summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 11:41:39 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 11:41:39 +0000
commitfcfb5e62f95d625836328131cc5ca851182bcae4 (patch)
tree5309ef2284a82d61ece838d1dd1c97c09df152b8
parentAdding upstream version 1.1.1. (diff)
downloadicingadb-upstream.tar.xz
icingadb-upstream.zip
Adding upstream version 1.2.0.upstream/1.2.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.github/dependabot.yml14
-rw-r--r--.mailmap1
-rw-r--r--AUTHORS1
-rw-r--r--CHANGELOG.md34
-rw-r--r--README.md7
-rw-r--r--cmd/icingadb-migrate/convert.go33
-rw-r--r--cmd/icingadb-migrate/misc.go3
-rw-r--r--cmd/icingadb/main.go17
-rw-r--r--config.example.yml64
-rw-r--r--dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/list1
-rw-r--r--dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.lock (renamed from dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.lock)0
-rw-r--r--dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.mod3
-rw-r--r--dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.zipbin0 -> 55809 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/benbjohnson/clock/@v/list1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/benbjohnson/clock/@v/v1.3.0.mod3
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.zipbin15284 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.lock (renamed from dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.lock)0
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.zipbin0 -> 17183 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.3.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.3.0.mod (renamed from dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.mod)0
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.15.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.16.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.16.0.mod (renamed from dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.15.0.mod)4
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/list1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.mod20
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.zipbin176032 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.mod3
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.zipbin119398 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.lock (renamed from dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.lock)0
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.mod5
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.zipbin0 -> 129854 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.zipbin106106 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.lock (renamed from dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.lock)0
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.mod (renamed from dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.mod)2
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.zipbin0 -> 109283 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.zipbin25027 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.lock (renamed from dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.lock)0
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.mod (renamed from dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.mod)0
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.zipbin0 -> 31981 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.19.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.20.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.20.mod (renamed from dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.19.mod)0
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.zipbin2599934 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.lock (renamed from dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.lock)0
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.mod (renamed from dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.mod)2
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.zipbin0 -> 2674679 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/list1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.lock (renamed from dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.lock)0
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.mod10
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.zipbin0 -> 250611 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.4.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.7.info1
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.7.mod (renamed from dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.4.mod)0
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/v1.8.4.mod10
-rw-r--r--dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/v1.9.0.mod16
-rw-r--r--dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.zipbin277618 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.lock (renamed from dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.lock)0
-rw-r--r--dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.mod (renamed from dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.mod)3
-rw-r--r--dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.zipbin0 -> 287887 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20230809094429-853ea248256d.info1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20230809094429-853ea248256d.mod11
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20240409090435-93d18d7e34b8.info1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20240409090435-93d18d7e34b8.mod11
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/list2
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.zipbin29766 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.lock (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.sum)0
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.mod (renamed from dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.mod)2
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.zipbin0 -> 26990 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.ziphash1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/list3
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.0.0-20211216021012-1d35b9e2eb4e.mod3
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.11.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.19.0.info1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.19.0.mod (renamed from dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.11.0.mod)2
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/list1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20220907171357-04be3eba64a2.info1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20231012003039-104605ab7028.info1
-rw-r--r--dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20231012003039-104605ab7028.mod3
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/cespare/xxhash/v2@v2.2.09
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/cespare/xxhash/v2@v2.3.09
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/davecgh/go-spew@v1.1.06
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.10.06
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.15.09
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.16.09
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-colorable@v0.1.86
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-isatty@v0.0.199
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-isatty@v0.0.209
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-runewidth@v0.0.156
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/rivo/uniseg@v0.4.49
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/rivo/uniseg@v0.4.79
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.1.06
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.4.06
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.5.06
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.7.06
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.7.16
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.8.06
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.8.16
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/go.uber.org/multierr@v1.11.06
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/exp@v0.0.0-20230809094429-853ea248256d9
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/exp@v0.0.0-20240409090435-93d18d7e34b89
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.0.0-20211019181941-9d821ace86546
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.0.0-20211216021012-1d35b9e2eb4e9
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.11.09
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.19.09
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/xerrors@v0.0.0-20200804184101-5ec99f83aff16
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/xerrors@v0.0.0-20231012003039-104605ab70289
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/gopkg.in/yaml.v3@v3.0.0-20200313102051-9f266ea9e77c6
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x032/849bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x055/239bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x061/424bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x063/929bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x068/671bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x073/736bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/030bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/030.p/198bin6336 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/031bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/033bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/034bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/035bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/036bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/037bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/037.p/124bin3968 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/038bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/038.p/229bin7328 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/039.p/138bin4416 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x078/105bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x078/530bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x080/086bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x088/466bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/483bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/509bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/821bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/031bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/031.p/226bin0 -> 7232 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/032bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/033bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/034bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/035bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/036bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/037bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/038.p/224bin0 -> 7168 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/038.p/227bin0 -> 7264 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/128bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/215bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/239bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/249bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/268bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/288bin8192 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/46bin1472 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/53bin1696 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/54bin1728 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/55bin1760 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/305bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/306bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/312bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/345bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/380bin0 -> 8192 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/382.p/239bin0 -> 7648 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/382.p/246bin0 -> 7872 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/2/001.p/126bin0 -> 4032 bytes
-rw-r--r--dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/2/001.p/33bin1056 -> 0 bytes
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/.github/workflows/test.yml17
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/LICENSE (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/LICENSE)0
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/README.md14
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/doc.go20
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/edwards25519.go427
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/edwards25519_test.go311
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/extra.go349
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/extra_test.go220
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe.go420
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_alias_test.go140
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64.go16
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64.s379
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64_noasm.go12
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64.go16
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64.s42
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64_noasm.go12
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_bench_test.go49
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_extra.go50
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_extra_test.go37
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_generic.go266
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_test.go566
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/go.mod3
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar.go343
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_alias_test.go111
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_fiat.go1147
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_test.go255
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalarmult.go214
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalarmult_test.go205
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/tables.go129
-rw-r--r--dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/tables_test.go119
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/.github/workflows/test.yml32
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.s215
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/.github/workflows/test.yml56
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/LICENSE.txt (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/LICENSE.txt)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/README.md (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/README.md)31
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/bench_test.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/bench_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/.gitignore (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/.gitignore)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/dynamic_test.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/dynamic_test.go)4
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/plugin.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/plugin.go)3
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.mod (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.mod)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.sum (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.sum)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/testall.sh10
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash.go)47
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_amd64.s209
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_arm64.s183
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_asm.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.go)2
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_other.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_other.go)22
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_safe.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_safe.go)1
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_test.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe.go)3
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe_test.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe_test.go)5
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/.gitignore (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/.gitignore)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/xxhsum.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/xxhsum.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml11
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml19
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml17
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore3
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md177
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile35
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md175
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go3478
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go3475
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod20
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum108
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go36
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go332
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go72
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go93
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go12
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go21
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go67
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json8
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go773
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf10
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fuzz.go25
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/go.mod3
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/result.go22
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/CONTRIBUTING.md (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/CONTRIBUTING.md)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/ISSUE_TEMPLATE.md (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/ISSUE_TEMPLATE.md)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/PULL_REQUEST_TEMPLATE.md (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/PULL_REQUEST_TEMPLATE.md)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/codeql.yml (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/codeql.yml)8
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/test.yml (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/test.yml)28
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.gitignore (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.gitignore)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/AUTHORS (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/AUTHORS)16
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/CHANGELOG.md (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/CHANGELOG.md)48
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/LICENSE (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/LICENSE)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/README.md (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/README.md)78
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_go118.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_go118.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth.go)63
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth_test.go)51
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/benchmark_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/benchmark_test.go)64
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/buffer.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/buffer.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/collations.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/collations.go)2
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_dummy.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_dummy.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_test.go)2
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection.go)100
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector.go)67
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector_test.go)4
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/const.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/const.go)13
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver.go)27
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver_test.go)917
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn.go)128
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_fuzz_test.go47
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn_test.go)113
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors.go)16
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors_test.go)6
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/fields.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fields.go)70
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.mod5
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.sum2
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/infile.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/infile.go)12
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime.go)4
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime_test.go)2
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets.go)209
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets_test.go)61
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/result.go50
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/rows.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/rows.go)13
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement.go)23
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement_test.go)4
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/transaction.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/transaction.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils.go)13
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils_test.go)89
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.codecov.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.codecov.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/FUNDING.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/FUNDING.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/bug_report.md29
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/feature_request.md20
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/pull_request_template.md4
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/workflows/go.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/workflows/go.yml)32
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/CHANGELOG.md (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/CHANGELOG.md)23
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/LICENSE (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/LICENSE)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/Makefile (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/Makefile)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/README.md (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/README.md)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast.go)4
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/cmd/ycat/ycat.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/cmd/ycat/ycat.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode_test.go)36
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode.go)4
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode_test.go)39
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/error.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/error.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.mod (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.mod)2
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.sum (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.sum)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/internal/errors/error.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/internal/errors/error.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/option.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/option.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/context.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/context.go)7
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser.go)41
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser_test.go)124
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/cr.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/cr.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/crlf.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/crlf.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/lf.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/lf.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path.go)26
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path_test.go)7
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/context.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/context.go)13
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/scanner.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/scanner.go)5
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/stdlib_quote.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/stdlib_quote.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/struct.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/struct.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/testdata/anchor.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/testdata/anchor.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token.go)4
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token_test.go)3
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml.go)62
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml_test.go)89
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.3.0/.travis.yml9
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTING.md10
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/CODEOWNERS6
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/release-please.yml2
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/apidiff.yaml26
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/tests.yaml20
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CHANGELOG.md41
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTING.md26
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTORS (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTORS)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/LICENSE (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/LICENSE)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/README.md (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/README.md)10
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/dce.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/dce.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/doc.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/doc.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/go.mod (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/go.mod)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/hash.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/hash.go)6
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/json_test.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/json_test.go)51
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/marshal.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/marshal.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_js.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_js.go)2
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_net.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_net.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null_test.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/seq_test.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/seq_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql_test.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/time.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/time.go)21
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/util.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/util.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid.go)89
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid_test.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid_test.go)228
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version1.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version1.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version4.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version4.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version6.go56
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version7.go104
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.codecov.yml (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.codecov.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/FUNDING.yml (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/FUNDING.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/cifuzz.yaml (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/cifuzz.yaml)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/docker.yaml (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/docker.yaml)3
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/go.yaml (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/go.yaml)4
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.gitignore (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.gitignore)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/LICENSE (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/LICENSE)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/README.md (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/README.md)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/Makefile (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/Makefile)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/main.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/main.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_func/main.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_func/main.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/fuzz/fuzz_openexec.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/fuzz/fuzz_openexec.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/hook/hook.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/hook/hook.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/json/json.go81
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/limit/limit.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/limit/limit.go)6
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/Makefile (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/Makefile)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/extension.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/extension.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/sqlite3_mod_regexp.c (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/sqlite3_mod_regexp.c)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/Makefile (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/Makefile)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/extension.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/extension.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/picojson.h (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/picojson.h)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/sqlite3_mod_vtable.cc (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/sqlite3_mod_vtable.cc)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/Dockerfile (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/Dockerfile)4
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/simple.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/simple.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/trace/main.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/trace/main.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/main.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/main.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/vtable.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/vtable.go)2
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/main.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/main.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/vtable.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/vtable.go)2
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback.go)16
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback_test.go)5
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/convert.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/convert.go)10
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/doc.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/doc.go)105
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.mod (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.mod)2
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.sum0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.c (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.c)19269
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.h (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.h)435
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3.go)195
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_context.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_context.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt.go)24
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt_test.go)2
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go113_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go113_test.go)5
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18.go)4
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_libsqlite3.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_libsqlite3.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_omit.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_omit.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_allow_uri_authority.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_allow_uri_authority.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_app_armor.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_app_armor.go)4
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_foreign_keys.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_foreign_keys.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts3_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts3_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts5.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts5.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_icu.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_icu.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_introspect.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_introspect.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_os_trace.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_os_trace.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook.go)9
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook_test.go)11
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_omit.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_omit.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete_fast.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete_fast.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_omit.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_omit.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_test.go)5
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_stat4.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_stat4.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.c (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.c)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.go)4
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth.go)36
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_omit.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_omit.go)36
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_full.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_full.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_incr.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_incr.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable.go)13
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable_test.go)26
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_other.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_other.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_solaris.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_solaris.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_test.go)74
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_trace.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_trace.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_type.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_type.go)2
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_usleep_windows.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_usleep_windows.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_windows.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_windows.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3ext.h (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3ext.h)10
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/static_mock.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/static_mock.go)5
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/CODEOWNERS1
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/FUNDING.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/bug_report.md (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/config.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/dependabot.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/release-drafter-config.yml48
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/spellcheck-settings.yml29
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/wordlist.txt60
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/build.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml)15
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/doctests.yaml41
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/golangci-lint.yml26
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/release-drafter.yml24
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/spellcheck.yml14
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/stale-issues.yml25
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/test-redis-enterprise.yml59
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.gitignore6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.golangci.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.prettierrc.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CHANGELOG.md124
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CONTRIBUTING.md101
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/LICENSE (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE)2
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/Makefile44
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/README.md274
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/RELEASING.md (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/acl_commands.go35
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_decode_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go)35
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go)110
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands.go163
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands_test.go98
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/cluster_commands.go192
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command.go5483
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go)6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands.go718
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands_test.go)2312
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doc.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/doc.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/README.md22
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/lpush_lrange_test.go48
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/set_get_test.go48
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/error.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/error.go)18
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_instrumentation_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_instrumentation_test.go)44
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_test.go)91
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/export_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/export_test.go)31
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/fuzz/fuzz.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/fuzz/fuzz.go)2
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands.go149
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands_test.go114
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/generic_commands.go384
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/geo_commands.go155
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.mod10
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.sum8
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hash_commands.go174
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hyperloglog_commands.go42
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/arg.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/arg.go)4
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag.go)2
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag_test.go)6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan.go)6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan_test.go)50
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/structmap.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/structmap.go)36
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal.go)2
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal_test.go)2
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/log.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/log.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/once.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/once.go)7
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/bench_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/bench_test.go)20
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/conn.go)18
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check.go49
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_dummy.go9
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_test.go47
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/export_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/export_test.go)5
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/main_test.go123
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool.go)227
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_single.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_single.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_sticky.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_sticky.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_test.go)194
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/proto_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/proto_test.go)4
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader.go552
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader_test.go100
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan.go)7
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan_test.go)6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer.go)42
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer_test.go154
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/rand/rand.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/rand/rand.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util.go)22
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/safe.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/safe.go)1
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/strconv.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/strconv.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/type.go5
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/unsafe.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/unsafe.go)1
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util_test.go53
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal_test.go354
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator.go)13
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator_test.go)6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json.go599
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json_test.go660
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/list_commands.go289
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/main_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/main_test.go)196
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/monitor_test.go48
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options.go)271
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options_test.go)41
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go)579
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_commands.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go)12
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go)499
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline.go)74
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline_test.go)14
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pool_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pool_test.go)51
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic.go1429
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic_test.go733
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub.go)99
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_commands.go76
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub_test.go)143
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/race_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/race_test.go)85
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis.go852
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis_test.go)248
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/result.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/result.go)22
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring.go)481
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring_test.go)287
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/script.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/script.go)23
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripting_commands.go215
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/bump_deps.sh (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/release.sh (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/release.sh)7
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/tag.sh (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/tag.sh)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel.go)368
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel_test.go)110
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/set_commands.go217
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sortedset_commands.go772
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/stream_commands.go438
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/string_commands.go303
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands.go922
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands_test.go940
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx.go)56
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx_test.go)11
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal.go)124
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal_test.go)9
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/version.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/version.go)2
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.25.0/Makefile82
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array_go118.go156
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array_go118_test.go240
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.25.0/example_go118_test.go66
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.codecov.yml (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.codecov.yml)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/ISSUE_TEMPLATE/bug_report.md (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/ISSUE_TEMPLATE/bug_report.md)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/ISSUE_TEMPLATE/config.yml (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/ISSUE_TEMPLATE/config.yml)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/ISSUE_TEMPLATE/feature_request.md (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/ISSUE_TEMPLATE/feature_request.md)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/dependabot.yml (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/dependabot.yml)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/workflows/fossa.yaml (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/workflows/fossa.yaml)2
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/workflows/go.yml (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/workflows/go.yml)39
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.gitignore (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.gitignore)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.golangci.yml77
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.readme.tmpl (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.readme.tmpl)10
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/CHANGELOG.md (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/CHANGELOG.md)273
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/CODE_OF_CONDUCT.md (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/CODE_OF_CONDUCT.md)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/CONTRIBUTING.md (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/CONTRIBUTING.md)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/FAQ.md (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/FAQ.md)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/LICENSE (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/LICENSE.txt)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/Makefile76
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/README.md (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/README.md)66
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/array.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array.go)127
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/array_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array_test.go)209
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/buffer.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/buffer.go)5
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/buffer_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/buffer_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/pool.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/pool.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/pool_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/pool_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/checklicense.sh (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/checklicense.sh)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/clock_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/clock_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/common_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/common_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/config.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/config.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/config_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/config_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/doc.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/doc.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/encoder.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/encoder.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/encoder_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/encoder_test.go)4
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/error.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/error.go)5
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/error_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/error_test.go)36
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/example_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/example_test.go)53
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/field.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/field.go)27
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/field_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/field_test.go)32
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/flag.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/flag.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/flag_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/flag_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/glide.yaml (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/glide.yaml)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/global.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/global.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/global_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/global_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/go.mod (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/go.mod)3
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/go.sum (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/go.sum)8
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/http_handler.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/http_handler.go)19
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/http_handler_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/http_handler_test.go)29
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/increase_level_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/increase_level_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/bufferpool/bufferpool.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/bufferpool/bufferpool.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/color/color.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/color/color.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/color/color_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/color/color_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/exit/exit.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/exit/exit.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/exit/exit_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/exit/exit_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/level_enabler.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/level_enabler.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/pool/pool.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/pool/pool.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/pool/pool_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/pool/pool_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/readme/readme.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/readme/readme.go)25
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/stacktrace/stack.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/stacktrace.go)71
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/stacktrace/stack_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/stacktrace_test.go)28
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/clock.go153
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/clock_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/clock_test.go)25
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/doc.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/doc.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/timeout.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/timeout.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/writer.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/writer.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/leak_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/leak_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/level.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/level.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/level_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/level_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/logger.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/logger.go)81
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/logger_bench_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/logger_bench_test.go)43
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/logger_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/logger_test.go)382
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/options.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/options.go)15
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sink.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sink.go)5
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sink_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sink_test.go)5
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sink_windows_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sink_windows_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/stacktrace_ext_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/stacktrace_ext_test.go)4
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sugar.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sugar.go)39
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sugar_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sugar_test.go)119
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/time.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/time.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/time_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/time_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/writer.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/writer.go)12
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/writer_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/writer_test.go)4
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/buffered_write_syncer.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/buffered_write_syncer.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/buffered_write_syncer_bench_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/buffered_write_syncer_bench_test.go)8
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/buffered_write_syncer_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/buffered_write_syncer_test.go)3
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/clock.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/clock.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/clock_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/clock_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/console_encoder.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/console_encoder.go)2
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/console_encoder_bench_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/console_encoder_bench_test.go)1
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/console_encoder_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/console_encoder_test.go)62
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/core.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/core.go)6
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/core_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/core_test.go)3
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/doc.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/doc.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/encoder.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/encoder.go)15
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/encoder_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/encoder_test.go)10
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/entry.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/entry.go)4
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/entry_ext_test.go55
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/entry_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/entry_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/error.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/error.go)5
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/error_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/error_test.go)47
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/field.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/field.go)2
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/field_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/field_test.go)15
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/hook.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/hook.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/hook_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/hook_test.go)1
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/increase_level.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/increase_level.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/increase_level_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/increase_level_test.go)2
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder.go)145
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder_bench_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder_bench_test.go)43
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder_impl_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder_impl_test.go)91
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder_test.go)14
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/lazy_with.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/clock.go)46
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/lazy_with_test.go154
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/leak_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/leak_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level_strings.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level_strings.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level_strings_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level_strings_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level_test.go)2
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/marshaler.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/marshaler.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/memory_encoder.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/memory_encoder.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/memory_encoder_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/memory_encoder_test.go)28
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/reflected_encoder.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/reflected_encoder.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/sampler.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/sampler.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/sampler_bench_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/sampler_bench_test.go)2
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/sampler_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/sampler_test.go)1
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/tee.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/tee.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/tee_logger_bench_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/tee_logger_bench_test.go)1
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/tee_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/tee_test.go)3
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/write_syncer.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/write_syncer.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/write_syncer_bench_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/write_syncer_bench_test.go)21
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/write_syncer_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/write_syncer_test.go)2
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapgrpc/zapgrpc.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapgrpc/zapgrpc.go)18
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapgrpc/zapgrpc_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapgrpc/zapgrpc_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapio/example_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapio/example_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapio/writer.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapio/writer.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapio/writer_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapio/writer_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/doc.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/doc.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/logger.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/logger.go)37
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/logger_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/logger_test.go)2
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/logged_entry.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/logged_entry.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/logged_entry_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/logged_entry_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/observer.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/observer.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/observer_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/observer_test.go)4
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/testingt.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/testingt.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/testingt_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/testingt_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/timeout.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/timeout.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/timeout_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/timeout_test.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/writer.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/writer.go)0
-rw-r--r--dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/writer_test.go (renamed from dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/writer_test.go)0
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map.go8
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/pre_go19.go371
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/CONTRIBUTING.md (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/CONTRIBUTING.md)0
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/LICENSE27
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/PATENTS (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/PATENTS)0
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/README.md (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/README.md)0
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/codereview.cfg (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/codereview.cfg)0
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/errgroup.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/errgroup.go)3
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/errgroup_example_md5all_test.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/errgroup_example_md5all_test.go)0
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/errgroup_test.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/errgroup_test.go)0
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/go120.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/go120.go)1
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/go120_test.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/go120_test.go)1
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/pre_go120.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/pre_go120.go)1
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/go.mod (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/go.mod)2
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore.go)42
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore_bench_test.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore_bench_test.go)3
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore_example_test.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore_example_test.go)0
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore_test.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore_test.go)35
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/singleflight/singleflight.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/singleflight/singleflight.go)9
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/singleflight/singleflight_test.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/singleflight/singleflight_test.go)63
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/go19.go)6
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map_bench_test.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map_bench_test.go)0
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map_reference_test.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map_reference_test.go)0
-rw-r--r--dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map_test.go (renamed from dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map_test.go)0
-rw-r--r--dependencies/pkg/sumdb/sum.golang.org/latest6
-rw-r--r--doc/01-About.md7
-rw-r--r--doc/02-Installation.md12
-rw-r--r--doc/03-Configuration.md120
-rw-r--r--doc/04-Upgrading.md106
-rw-r--r--doc/05-Distributed-Setups.md16
-rw-r--r--doc/TRADEMARKS.md13
-rw-r--r--doc/images/icingadb-architecture.pngbin563761 -> 454289 bytes
-rw-r--r--doc/images/icingadb-daemon.pngbin527021 -> 419862 bytes
-rw-r--r--doc/images/icingadb-database.pngbin528639 -> 418924 bytes
-rw-r--r--doc/images/icingadb-envs.pngbin660235 -> 470088 bytes
-rw-r--r--doc/images/icingadb-ha.pngbin642860 -> 764923 bytes
-rw-r--r--doc/images/icingadb-web.pngbin532529 -> 424776 bytes
-rw-r--r--go.mod22
-rw-r--r--go.sum61
-rw-r--r--internal/version.go2
-rw-r--r--pkg/backoff/backoff.go4
-rw-r--r--pkg/config/config.go24
-rw-r--r--pkg/config/config_test.go50
-rw-r--r--pkg/config/database.go75
-rw-r--r--pkg/config/redis.go10
-rw-r--r--pkg/driver/driver.go114
-rw-r--r--pkg/driver/pgsql.go22
-rw-r--r--pkg/flatten/flatten.go11
-rw-r--r--pkg/flatten/flatten_test.go45
-rw-r--r--pkg/icingadb/cleanup.go47
-rw-r--r--pkg/icingadb/db.go66
-rw-r--r--pkg/icingadb/driver.go90
-rw-r--r--pkg/icingadb/dump_signals.go2
-rw-r--r--pkg/icingadb/ha.go208
-rw-r--r--pkg/icingadb/history/sla.go2
-rw-r--r--pkg/icingadb/history/sync.go8
-rw-r--r--pkg/icingadb/objectpacker/objectpacker.go13
-rw-r--r--pkg/icingadb/overdue/sync.go2
-rw-r--r--pkg/icingadb/runtime_updates.go2
-rw-r--r--pkg/icingadb/v1/checkable.go2
-rw-r--r--pkg/icingadb/v1/history/state.go2
-rw-r--r--pkg/icingadb/v1/state.go2
-rw-r--r--pkg/icingaredis/client.go2
-rw-r--r--pkg/icingaredis/heartbeat.go14
-rw-r--r--pkg/icingaredis/telemetry/heartbeat.go2
-rw-r--r--pkg/icingaredis/telemetry/stats.go2
-rw-r--r--pkg/icingaredis/utils.go2
-rw-r--r--pkg/retry/retry.go141
-rw-r--r--pkg/types/string.go8
-rw-r--r--schema/mysql/schema.sql10
-rw-r--r--schema/mysql/upgrades/1.0.0-rc2.sql2
-rw-r--r--schema/mysql/upgrades/1.0.0.sql2
-rw-r--r--schema/mysql/upgrades/1.1.1.sql2
-rw-r--r--schema/mysql/upgrades/1.2.0.sql13
-rw-r--r--schema/mysql/upgrades/optional/1.2.0-history.sql1
-rw-r--r--schema/pgsql/schema.sql14
-rw-r--r--schema/pgsql/upgrades/1.2.0.sql153
-rw-r--r--schema/pgsql/upgrades/optional/1.2-0-history.sql3
-rw-r--r--tests/go.mod48
-rw-r--r--tests/go.sum1090
-rw-r--r--tests/history_bench_test.go13
-rw-r--r--tests/history_test.go8
-rw-r--r--tests/internal/utils/redis.go2
-rw-r--r--tests/object_sync_test.go2
-rw-r--r--tests/sla_test.go13
-rw-r--r--tests/sql/sla_test.go36
866 files changed, 51693 insertions, 21320 deletions
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 3395774..9512ea0 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -1,12 +1,14 @@
version: 2
updates:
+
- package-ecosystem: gomod
directory: "/"
schedule:
- interval: weekly
- time: '10:00'
+ interval: daily
+ open-pull-requests-limit: 10
+
+- package-ecosystem: gomod
+ directory: "/tests"
+ schedule:
+ interval: daily
open-pull-requests-limit: 10
- reviewers:
- - Al2Klimov
- - julianbrost
- - lippserd
diff --git a/.mailmap b/.mailmap
index edbf393..bd13a63 100644
--- a/.mailmap
+++ b/.mailmap
@@ -3,5 +3,6 @@ Diana Flach <crunsher@bamberg.ccc.de> <Crunsher@users.noreply.github.com>
Diana Flach <crunsher@bamberg.ccc.de> <jean-marcel.flach@icinga.com>
Diana Flach <crunsher@bamberg.ccc.de> <jean.flach@icinga.com>
Henrik Triem <henrik.triem@icinga.com> <43344334+htriem@users.noreply.github.com>
+Julian Brost <julian.brost@icinga.com> <julian.brost@gmail.com>
<henrik.triem@icinga.com> <henrik.triem@netways.de>
<yonas.habteab@icinga.com> <yonas.habteab@netways.de>
diff --git a/AUTHORS b/AUTHORS
index 37c365d..7971eb2 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,4 +1,5 @@
Alexander A. Klimov <alexander.klimov@icinga.com>
+Alvar Penning <alvar.penning@icinga.com>
Eric Lippmann <eric.lippmann@icinga.com>
Feu Mourek <feu.mourek@icinga.com>
Henrik Triem <henrik.triem@icinga.com>
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c0c8aea..73b88fb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,39 @@
# Icinga DB Changelog
+## 1.2.0 (2024-04-11)
+
+This release addresses multiple issues related to fault recoveries,
+with a particular focus on retryable database errors that may occur when using Icinga DB with database clusters.
+
+Since there may be a large number of errors that are resolved by retrying after a certain amount of time,
+#698 changed the retry behavior to retry every database-related error for five minutes.
+This helps Icinga DB survive network hiccups or more complicated database situations,
+such as working with a database cluster.
+
+The latter was specifically addressed in #711 for Galera Clusters on MySQL or MariaDB by configuring `wsrep_sync_wait` on used database sessions.
+Galera users should refer to the [Configuration documentation](doc/03-Configuration.md#database-options) for more details.
+
+In summary, the most notable changes are as follows:
+
+* Custom Variables: Render large numbers as-is, not using scientific notation. #657
+* Enhance retries for database errors and other failures for up to five minutes. #693, #698, #739, #740
+* MySQL/MariaDB: Use strict SQL mode. #699
+* MySQL/MariaDB Galera Cluster: Set `wsrep_sync_wait` for cluster-wide causality checks. #711
+* Don't crash history sync in the absence of Redis®[\*](doc/TRADEMARKS.md#redis). #725
+* Update dependencies. [27 times](https://github.com/Icinga/icingadb/pulls?q=is%3Apr+is%3Amerged+label%3Adependencies+milestone%3A1.2.0)
+
+### Schema
+
+In addition to mandatory schema upgrades, this release includes an optional upgrade that can be applied subsequently.
+Details are available in the [Upgrading documentation](doc/04-Upgrading.md#upgrading-to-icinga-db-v120) and #656.
+
+All schema changes are listed below:
+
+* Allow host and service check attempts >= 256. #656
+* Composite `INDEX` for the history table to speed up history view in Icinga DB Web. #686
+* MySQL/MariaDB: Fix `icingadb_schema.timestamp` not being Unix time. #700
+* PostgreSQL: Change `get_sla_ok_percent` to return decimal numbers in SLA overview. #710
+
## 1.1.1 (2023-08-09)
This release fixes a few crashes in the Icinga DB daemon, addresses some shortcomings in the database schema,
diff --git a/README.md b/README.md
index 8cc7c16..54fe707 100644
--- a/README.md
+++ b/README.md
@@ -3,13 +3,14 @@
Icinga DB is a set of components for publishing, synchronizing and
visualizing monitoring data in the Icinga ecosystem, consisting of:
-* The Icinga DB daemon, which synchronizes monitoring data between a Redis server and a database
+* The Icinga DB daemon,
+ which synchronizes monitoring data between a Redis®[\*](doc/TRADEMARKS.md#redis) server and a database
* Icinga 2 with its [Icinga DB feature](https://icinga.com/docs/icinga-2/latest/doc/14-features/#icinga-db) enabled,
- responsible for publishing the data to the Redis server, i.e. configuration and its runtime updates, check results,
+ responsible for publishing the data to the Redis® server, i.e. configuration and its runtime updates, check results,
state changes, downtimes, acknowledgements, notifications, and other events such as flapping
* And Icinga Web with the
[Icinga DB Web](https://icinga.com/docs/icinga-db-web) module enabled,
- which connects to both Redis and the database to display and work with the most up-to-date data
+ which connects to both Redis® and the database to display and work with the most up-to-date data
![Icinga DB Architecture](doc/images/icingadb-architecture.png)
diff --git a/cmd/icingadb-migrate/convert.go b/cmd/icingadb-migrate/convert.go
index e14746e..5cfa7bd 100644
--- a/cmd/icingadb-migrate/convert.go
+++ b/cmd/icingadb-migrate/convert.go
@@ -150,19 +150,9 @@ func convertCommentRows(
},
AckHistoryUpserter: history.AckHistoryUpserter{ClearTime: clearTime},
SetTime: setTime,
- Author: icingadbTypes.String{
- NullString: sql.NullString{
- String: row.AuthorName,
- Valid: true,
- },
- },
- Comment: icingadbTypes.String{
- NullString: sql.NullString{
- String: row.CommentData,
- Valid: true,
- },
- },
- ExpireTime: convertTime(row.ExpirationTime, 0),
+ Author: icingadbTypes.MakeString(row.AuthorName),
+ Comment: icingadbTypes.MakeString(row.CommentData),
+ ExpireTime: convertTime(row.ExpirationTime, 0),
IsPersistent: icingadbTypes.Bool{
Bool: row.IsPersistent != 0,
Valid: true,
@@ -656,13 +646,8 @@ func convertNotificationRows(
SendTime: ts,
State: row.State,
PreviousHardState: previousHardState,
- Text: icingadbTypes.String{
- NullString: sql.NullString{
- String: text,
- Valid: true,
- },
- },
- UsersNotified: row.ContactsNotified,
+ Text: icingadbTypes.MakeString(text),
+ UsersNotified: row.ContactsNotified,
})
allHistory = append(allHistory, &history.HistoryNotification{
@@ -739,8 +724,8 @@ type stateRow = struct {
StateTimeUsec uint32
State uint8
StateType uint8
- CurrentCheckAttempt uint16
- MaxCheckAttempts uint16
+ CurrentCheckAttempt uint32
+ MaxCheckAttempts uint32
LastState uint8
LastHardState uint8
Output sql.NullString
@@ -813,10 +798,10 @@ func convertStateRows(
HardState: row.LastHardState,
PreviousSoftState: row.LastState,
PreviousHardState: previousHardState,
- CheckAttempt: uint8(row.CurrentCheckAttempt),
+ CheckAttempt: row.CurrentCheckAttempt,
Output: icingadbTypes.String{NullString: row.Output},
LongOutput: icingadbTypes.String{NullString: row.LongOutput},
- MaxCheckAttempts: uint32(row.MaxCheckAttempts),
+ MaxCheckAttempts: row.MaxCheckAttempts,
CheckSource: icingadbTypes.String{NullString: row.CheckSource},
})
diff --git a/cmd/icingadb-migrate/misc.go b/cmd/icingadb-migrate/misc.go
index f1db20c..b8d358f 100644
--- a/cmd/icingadb-migrate/misc.go
+++ b/cmd/icingadb-migrate/misc.go
@@ -4,7 +4,6 @@ import (
"context"
"crypto/sha1"
"github.com/icinga/icingadb/pkg/contracts"
- "github.com/icinga/icingadb/pkg/driver"
"github.com/icinga/icingadb/pkg/icingadb"
"github.com/icinga/icingadb/pkg/icingadb/objectpacker"
icingadbTypes "github.com/icinga/icingadb/pkg/types"
@@ -110,7 +109,7 @@ func sliceIdoHistory[Row any](
args["checkpoint"] = checkpoint
args["bulk"] = 20000
- if ht.snapshot.DriverName() != driver.MySQL {
+ if ht.snapshot.DriverName() != icingadb.MySQL {
query = strings.ReplaceAll(query, " USE INDEX (PRIMARY)", "")
}
diff --git a/cmd/icingadb/main.go b/cmd/icingadb/main.go
index 77ce577..4e165eb 100644
--- a/cmd/icingadb/main.go
+++ b/cmd/icingadb/main.go
@@ -3,7 +3,7 @@ package main
import (
"context"
"fmt"
- "github.com/go-redis/redis/v8"
+ "github.com/icinga/icingadb/internal"
"github.com/icinga/icingadb/internal/command"
"github.com/icinga/icingadb/pkg/common"
"github.com/icinga/icingadb/pkg/icingadb"
@@ -16,6 +16,7 @@ import (
"github.com/icinga/icingadb/pkg/utils"
"github.com/okzk/sdnotify"
"github.com/pkg/errors"
+ "github.com/redis/go-redis/v9"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"os"
@@ -55,11 +56,7 @@ func run() int {
logger := logs.GetLogger()
defer logger.Sync()
- if warn := cmd.Config.DecodeWarning; warn != nil {
- logger.Warnf("ignoring unknown config option, this will become a fatal error in Icinga DB v1.2:\n\n%v", warn)
- }
-
- logger.Info("Starting Icinga DB")
+ logger.Infof("Starting Icinga DB daemon (%s)", internal.Version.Version)
db, err := cmd.Database(logs.GetChildLogger("database"))
if err != nil {
@@ -165,8 +162,8 @@ func run() int {
hactx, cancelHactx := context.WithCancel(ctx)
for hactx.Err() == nil {
select {
- case <-ha.Takeover():
- logger.Info("Taking over")
+ case takeoverReason := <-ha.Takeover():
+ logger.Infow("Taking over", zap.String("reason", takeoverReason))
go func() {
for hactx.Err() == nil {
@@ -327,8 +324,8 @@ func run() int {
}
}
}()
- case <-ha.Handover():
- logger.Warn("Handing over")
+ case handoverReason := <-ha.Handover():
+ logger.Warnw("Handing over", zap.String("reason", handoverReason))
cancelHactx()
case <-hactx.Done():
diff --git a/config.example.yml b/config.example.yml
index 61d3933..90fea96 100644
--- a/config.example.yml
+++ b/config.example.yml
@@ -23,25 +23,59 @@ database:
# Database password.
password: CHANGEME
-# Connection configuration for the Redis server where Icinga 2 writes its configuration, state and history items.
+ # List of low-level database options that can be set to influence some Icinga DB internal default behaviours.
+ # Do not change the defaults if you don't have to!
+# options:
+ # Maximum number of connections Icinga DB is allowed to open in parallel to the database.
+ # By default, Icinga DB is allowed to open up to "16" connections whenever necessary.
+ # Setting this to a number less than or equal to "-1" allows Icinga DB to open an unlimited number of connections.
+ # However, it is not possible to set this option to "0".
+# max_connections: 16
+
+ # Maximum number of queries allowed to connect to a single database table simultaneously.
+ # By default, Icinga DB is allowed to execute up to "8" queries of any kind, e.g. INSERT,UPDATE,DELETE
+ # concurrently on a given table.
+ # It is not possible to set this option to a smaller number than "1".
+# max_connections_per_table: 8
+
+ # Maximum number of placeholders Icinga DB is allowed to use for a single SQL statement.
+ # By default, Icinga DB uses up to "8192" placeholders when necessary, which showed the
+ # best performance in terms of execution time and parallelism in our tests.
+ # It is not possible to set this option to a smaller number than "1".
+# max_placeholders_per_statement: 8192
+
+ # Maximum number of rows Icinga DB is allowed to select, delete, update or insert in a single transaction.
+ # By default, Icinga DB selects,deletes,updates,inserts up to "8192" rows in a single transaction, which showed
+ # the best performance in terms of execution time and parallelism in our tests.
+ # It is not possible to set this option to a smaller number than "1".
+# max_rows_per_transaction: 8192
+
+ # Enforce Galera cluster nodes to perform strict cluster-wide causality checks before executing
+ # specific SQL queries determined by the number you provided.
+ # Note: You can only set this option to a number "0 - 15".
+ # Defaults to 7.
+ # See https://icinga.com/docs/icinga-db/latest/doc/03-Configuration/#galera-cluster
+# wsrep_sync_wait: 7
+
+# Connection configuration for the Redis® server where Icinga 2 writes its configuration, state and history items.
# This is the same connection as configured in the 'icingadb' feature of the corresponding Icinga 2 node.
-# High availability setups require a dedicated Redis server per Icinga 2 node and
+# High availability setups require a dedicated Redis® server per Icinga 2 node and
# therefore a dedicated Icinga DB instance that connects to it.
redis:
- # Redis host or absolute Unix socket path.
+ # Host name or address, or absolute Unix socket path.
host: localhost
- # Redis port.
- # Defaults to '6380' since the Redis server provided by the 'icingadb-redis' package listens on that port.
+ # TCP port.
+ # Defaults to '6380' as the Redis® open source server provided by the 'icingadb-redis' package listens on that port.
# port: 6380
- # Redis password.
+ # Authentication password.
# password:
# Icinga DB logs its activities at various severity levels and any errors that occur either
# on the console or in systemd's journal. The latter is used automatically when running under systemd.
# In any case, the default log level is 'info'.
-logging:
+#logging:
# Default logging level. Can be set to 'fatal', 'error', 'warn', 'info' or 'debug'.
# If not set, defaults to 'info'.
# level: info
@@ -57,7 +91,7 @@ logging:
# interval: 20s
# Map of component-logging level pairs to define a different log level than the default value for each component.
- options:
+# options:
# config-sync:
# database:
# dump-signals:
@@ -72,17 +106,27 @@ logging:
# Retention is an optional feature to limit the number of days that historical data is available,
# as no historical data is deleted by default.
-retention:
+#retention:
# Number of days to retain full historical data. By default, historical data is retained forever.
# history-days:
# Number of days to retain historical data for SLA reporting. By default, it is retained forever.
# sla-days:
+ # Interval for periodically cleaning up the historical data, defined as a duration string.
+ # A duration string is a sequence of decimal numbers and a unit suffix, such as "20s".
+ # Valid units are "ms", "s", "m", "h".
+ # Defaults to "1h".
+# interval: 1h
+
+ # Number of old historical entries a single query can delete in a "DELETE FROM ... LIMIT count" manner.
+ # By default, this is limited to 5000 entries.
+# count: 5000
+
# Map of history category to number of days to retain its data in order to
# enable retention only for specific categories or to
# override the number that has been configured in history-days.
- options:
+# options:
# acknowledgement:
# comment:
# downtime:
diff --git a/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/list b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/list
new file mode 100644
index 0000000..795460f
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/list
@@ -0,0 +1 @@
+v1.1.0
diff --git a/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.info b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.info
new file mode 100644
index 0000000..8d28d4f
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.info
@@ -0,0 +1 @@
+{"Version":"v1.1.0","Time":"2023-12-10T19:13:24Z","Origin":{"VCS":"git","URL":"https://github.com/FiloSottile/edwards25519","Ref":"refs/tags/v1.1.0","Hash":"325f520de716c1d2d2b4e8dc2f82c7ccc5fac764"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.lock b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.lock
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.lock
+++ b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.lock
diff --git a/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.mod b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.mod
new file mode 100644
index 0000000..78e04e9
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.mod
@@ -0,0 +1,3 @@
+module filippo.io/edwards25519
+
+go 1.20
diff --git a/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.zip b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.zip
new file mode 100644
index 0000000..b475167
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.zip
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.ziphash b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.ziphash
new file mode 100644
index 0000000..e183bc3
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/filippo.io/edwards25519/@v/v1.1.0.ziphash
@@ -0,0 +1 @@
+h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/benbjohnson/clock/@v/list b/dependencies/pkg/mod/cache/download/github.com/benbjohnson/clock/@v/list
deleted file mode 100644
index 18fa8e7..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/benbjohnson/clock/@v/list
+++ /dev/null
@@ -1 +0,0 @@
-v1.3.0
diff --git a/dependencies/pkg/mod/cache/download/github.com/benbjohnson/clock/@v/v1.3.0.mod b/dependencies/pkg/mod/cache/download/github.com/benbjohnson/clock/@v/v1.3.0.mod
deleted file mode 100644
index 758903a..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/benbjohnson/clock/@v/v1.3.0.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/benbjohnson/clock
-
-go 1.15
diff --git a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/list b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/list
index 9811b76..6526ce1 100644
--- a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/list
+++ b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/list
@@ -1,2 +1,2 @@
-v2.1.2
v2.2.0
+v2.3.0
diff --git a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.zip b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.zip
deleted file mode 100644
index abc5f21..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.zip
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.ziphash b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.ziphash
deleted file mode 100644
index 21a12fd..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.ziphash
+++ /dev/null
@@ -1 +0,0 @@
-h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.info b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.info
deleted file mode 100644
index b1d832f..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v2.2.0","Time":"2022-12-04T02:06:23Z","Origin":{"VCS":"git","URL":"https://github.com/cespare/xxhash","Ref":"refs/tags/v2.2.0","Hash":"a76eb16a93c1e30527c073ca831d9048b4b935f6"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.lock b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.lock
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.lock
+++ b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.lock
diff --git a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.zip b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.zip
new file mode 100644
index 0000000..380ad72
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.zip
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.ziphash b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.ziphash
new file mode 100644
index 0000000..cf8dd5b
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.2.0.ziphash
@@ -0,0 +1 @@
+h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.3.0.info b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.3.0.info
new file mode 100644
index 0000000..820853f
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.3.0.info
@@ -0,0 +1 @@
+{"Version":"v2.3.0","Time":"2024-04-04T20:00:10Z","Origin":{"VCS":"git","URL":"https://github.com/cespare/xxhash","Ref":"refs/tags/v2.3.0","Hash":"998dce232f17418a7a5721ecf87ca714025a3243"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.mod b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.3.0.mod
index 49f6760..49f6760 100644
--- a/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.1.2.mod
+++ b/dependencies/pkg/mod/cache/download/github.com/cespare/xxhash/v2/@v/v2.3.0.mod
diff --git a/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/list b/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/list
index 73fba4f..de92ea1 100644
--- a/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/list
+++ b/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/list
@@ -1,3 +1,3 @@
v1.10.0
v1.13.0
-v1.15.0
+v1.16.0
diff --git a/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.15.0.info b/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.15.0.info
deleted file mode 100644
index 643baea..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.15.0.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v1.15.0","Time":"2023-03-12T11:25:03Z","Origin":{"VCS":"git","URL":"https://github.com/fatih/color","Ref":"refs/tags/v1.15.0","Hash":"12126ed593697635c525b302836b292b657ea573"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.16.0.info b/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.16.0.info
new file mode 100644
index 0000000..9f69498
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.16.0.info
@@ -0,0 +1 @@
+{"Version":"v1.16.0","Time":"2023-11-06T08:25:55Z","Origin":{"VCS":"git","URL":"https://github.com/fatih/color","Ref":"refs/tags/v1.16.0","Hash":"0f9779ed479afd460f0c2cc5a3d3eb69b9ba188b"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.15.0.mod b/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.16.0.mod
index 4fa49be..9959705 100644
--- a/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.15.0.mod
+++ b/dependencies/pkg/mod/cache/download/github.com/fatih/color/@v/v1.16.0.mod
@@ -4,7 +4,7 @@ go 1.17
require (
github.com/mattn/go-colorable v0.1.13
- github.com/mattn/go-isatty v0.0.17
+ github.com/mattn/go-isatty v0.0.20
)
-require golang.org/x/sys v0.6.0
+require golang.org/x/sys v0.14.0
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/list b/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/list
deleted file mode 100644
index dbb87b9..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/list
+++ /dev/null
@@ -1 +0,0 @@
-v8.11.5
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.info b/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.info
deleted file mode 100644
index 38f5c02..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v8.11.5","Time":"2022-03-17T13:27:49Z"} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.mod b/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.mod
deleted file mode 100644
index d2610c2..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.mod
+++ /dev/null
@@ -1,20 +0,0 @@
-module github.com/go-redis/redis/v8
-
-go 1.17
-
-require (
- github.com/cespare/xxhash/v2 v2.1.2
- github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
- github.com/onsi/ginkgo v1.16.5
- github.com/onsi/gomega v1.18.1
-)
-
-require (
- github.com/fsnotify/fsnotify v1.4.9 // indirect
- github.com/nxadm/tail v1.4.8 // indirect
- golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 // indirect
- golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
- golang.org/x/text v0.3.6 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
-)
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.zip b/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.zip
deleted file mode 100644
index ac79681..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.zip
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.ziphash b/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.ziphash
deleted file mode 100644
index 17c3a46..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/go-redis/redis/v8/@v/v8.11.5.ziphash
+++ /dev/null
@@ -1 +0,0 @@
-h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/list b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/list
index 35d23a8..fdc860b 100644
--- a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/list
+++ b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/list
@@ -1,2 +1,2 @@
v1.6.0
-v1.7.1
+v1.8.1
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.info b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.info
deleted file mode 100644
index 1209183..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v1.7.1","Time":"2023-04-25T10:02:15Z","Origin":{"VCS":"git","URL":"https://github.com/go-sql-driver/mysql","Ref":"refs/tags/v1.7.1","Hash":"f20b2863636093e5fbf1481b59bdaff3b0fbb779"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.mod b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.mod
deleted file mode 100644
index 2511104..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/go-sql-driver/mysql
-
-go 1.13
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.zip b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.zip
deleted file mode 100644
index 301fad7..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.zip
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.ziphash b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.ziphash
deleted file mode 100644
index ff656c1..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.ziphash
+++ /dev/null
@@ -1 +0,0 @@
-h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.info b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.info
new file mode 100644
index 0000000..5fe9f39
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.info
@@ -0,0 +1 @@
+{"Version":"v1.8.1","Time":"2024-03-26T14:34:07Z","Origin":{"VCS":"git","URL":"https://github.com/go-sql-driver/mysql","Ref":"refs/tags/v1.8.1","Hash":"4395c45fd098a81c5251667cda111f94c693ab14"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.lock b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.lock
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.7.1.lock
+++ b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.lock
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.mod b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.mod
new file mode 100644
index 0000000..4629714
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.mod
@@ -0,0 +1,5 @@
+module github.com/go-sql-driver/mysql
+
+go 1.18
+
+require filippo.io/edwards25519 v1.1.0
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.zip b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.zip
new file mode 100644
index 0000000..991ec2f
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.zip
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.ziphash b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.ziphash
new file mode 100644
index 0000000..b688937
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/go-sql-driver/mysql/@v/v1.8.1.ziphash
@@ -0,0 +1 @@
+h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/list b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/list
index cd74ac3..3d461ea 100644
--- a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/list
+++ b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/list
@@ -1 +1 @@
-v1.11.0
+v1.11.3
diff --git a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.info b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.info
deleted file mode 100644
index 499fe61..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v1.11.0","Time":"2023-04-02T01:50:03Z","Origin":{"VCS":"git","URL":"https://github.com/goccy/go-yaml","Ref":"refs/tags/v1.11.0","Hash":"4052b059bc2ad6cd230a4d8be77e39fa1b1382df"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.zip b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.zip
deleted file mode 100644
index 5ed3d40..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.zip
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.ziphash b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.ziphash
deleted file mode 100644
index 016cc4f..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.ziphash
+++ /dev/null
@@ -1 +0,0 @@
-h1:n7Z+zx8S9f9KgzG6KtQKf+kwqXZlLNR2F6018Dgau54= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.info b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.info
new file mode 100644
index 0000000..93dd5a8
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.info
@@ -0,0 +1 @@
+{"Version":"v1.11.3","Time":"2024-01-26T07:45:04Z","Origin":{"VCS":"git","URL":"https://github.com/goccy/go-yaml","Ref":"refs/tags/v1.11.3","Hash":"31fe1baacec127337140701face2e64a356075fd"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.lock b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.lock
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.lock
+++ b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.lock
diff --git a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.mod b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.mod
index f6e74c3..4550ff3 100644
--- a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.0.mod
+++ b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.mod
@@ -1,6 +1,6 @@
module github.com/goccy/go-yaml
-go 1.18
+go 1.19
require (
github.com/fatih/color v1.10.0
diff --git a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.zip b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.zip
new file mode 100644
index 0000000..d823d0e
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.zip
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.ziphash b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.ziphash
new file mode 100644
index 0000000..bf3f752
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/goccy/go-yaml/@v/v1.11.3.ziphash
@@ -0,0 +1 @@
+h1:B3W9IdWbvrUu2OYQGwvU1nZtvMQJPBKgBUuweJjLj6I= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/list b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/list
index 18fa8e7..b7c0a9b 100644
--- a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/list
+++ b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/list
@@ -1 +1 @@
-v1.3.0
+v1.6.0
diff --git a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.info b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.info
deleted file mode 100644
index abbd344..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v1.3.0","Time":"2021-07-12T22:33:52Z"} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.zip b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.zip
deleted file mode 100644
index 63e6022..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.zip
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.ziphash b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.ziphash
deleted file mode 100644
index dc67306..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.ziphash
+++ /dev/null
@@ -1 +0,0 @@
-h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.info b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.info
new file mode 100644
index 0000000..ef68c97
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.info
@@ -0,0 +1 @@
+{"Version":"v1.6.0","Time":"2024-01-23T18:54:04Z","Origin":{"VCS":"git","URL":"https://github.com/google/uuid","Ref":"refs/tags/v1.6.0","Hash":"0f11ee6918f41a04c201eceeadf612a377bc7fbc"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.lock b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.lock
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.lock
+++ b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.lock
diff --git a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.mod b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.mod
index fc84cd7..fc84cd7 100644
--- a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.3.0.mod
+++ b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.mod
diff --git a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.zip b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.zip
new file mode 100644
index 0000000..e910ff2
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.zip
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.ziphash b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.ziphash
new file mode 100644
index 0000000..3b6900f
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/google/uuid/@v/v1.6.0.ziphash
@@ -0,0 +1 @@
+h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/list b/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/list
index eefdb82..7ee43f2 100644
--- a/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/list
+++ b/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/list
@@ -1,4 +1,4 @@
v0.0.12
v0.0.14
v0.0.16
-v0.0.19
+v0.0.20
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.19.info b/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.19.info
deleted file mode 100644
index 818489d..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.19.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v0.0.19","Time":"2023-03-22T15:51:17Z","Origin":{"VCS":"git","URL":"https://github.com/mattn/go-isatty","Ref":"refs/tags/v0.0.19","Hash":"c067b4f3df49dfc0f376d884e16cfd784ea1874b"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.20.info b/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.20.info
new file mode 100644
index 0000000..091c31c
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.20.info
@@ -0,0 +1 @@
+{"Version":"v0.0.20","Time":"2023-10-17T07:28:21Z","Origin":{"VCS":"git","URL":"https://github.com/mattn/go-isatty","Ref":"refs/tags/v0.0.20","Hash":"a7c02353c47bc4ec6b30dc9628154ae4fe760c11"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.19.mod b/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.20.mod
index 057d69d..057d69d 100644
--- a/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.19.mod
+++ b/dependencies/pkg/mod/cache/download/github.com/mattn/go-isatty/@v/v0.0.20.mod
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/list b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/list
index d31391a..2af3357 100644
--- a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/list
+++ b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/list
@@ -1,2 +1,2 @@
v1.14.6
-v1.14.17
+v1.14.22
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.info b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.info
deleted file mode 100644
index 9366971..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v1.14.17","Time":"2023-05-31T23:32:35Z","Origin":{"VCS":"git","URL":"https://github.com/mattn/go-sqlite3","Ref":"refs/tags/v1.14.17","Hash":"f08f1b6b9ce62b2496d8d64df26c1e278887bc1c"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.zip b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.zip
deleted file mode 100644
index 8e8aa42..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.zip
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.ziphash b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.ziphash
deleted file mode 100644
index 880bbdd..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.ziphash
+++ /dev/null
@@ -1 +0,0 @@
-h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.info b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.info
new file mode 100644
index 0000000..f359477
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.info
@@ -0,0 +1 @@
+{"Version":"v1.14.22","Time":"2024-02-02T17:00:28Z","Origin":{"VCS":"git","URL":"https://github.com/mattn/go-sqlite3","Ref":"refs/tags/v1.14.22","Hash":"6ee3e6746e6b5a0f4099d0553120ceead9f9fc38"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.lock b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.lock
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.lock
+++ b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.lock
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.mod b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.mod
index 89788ab..e342dcc 100644
--- a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.17.mod
+++ b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.mod
@@ -1,6 +1,6 @@
module github.com/mattn/go-sqlite3
-go 1.16
+go 1.19
retract (
[v2.0.0+incompatible, v2.0.6+incompatible] // Accidental; no major changes or features.
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.zip b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.zip
new file mode 100644
index 0000000..6a58c47
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.zip
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.ziphash b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.ziphash
new file mode 100644
index 0000000..58cf4fa
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/mattn/go-sqlite3/@v/v1.14.22.ziphash
@@ -0,0 +1 @@
+h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/list b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/list
new file mode 100644
index 0000000..1890ff6
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/list
@@ -0,0 +1 @@
+v9.5.1
diff --git a/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.info b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.info
new file mode 100644
index 0000000..3d4ef4a
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.info
@@ -0,0 +1 @@
+{"Version":"v9.5.1","Time":"2024-02-20T15:44:29Z","Origin":{"VCS":"git","URL":"https://github.com/redis/go-redis","Ref":"refs/tags/v9.5.1","Hash":"d43a9fa887d9284ba42fcd46d46e97c56b34e132"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.lock b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.lock
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.lock
+++ b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.lock
diff --git a/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.mod b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.mod
new file mode 100644
index 0000000..6c65f09
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.mod
@@ -0,0 +1,10 @@
+module github.com/redis/go-redis/v9
+
+go 1.18
+
+require (
+ github.com/bsm/ginkgo/v2 v2.12.0
+ github.com/bsm/gomega v1.27.10
+ github.com/cespare/xxhash/v2 v2.2.0
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
+)
diff --git a/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.zip b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.zip
new file mode 100644
index 0000000..4ece052
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.zip
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.ziphash b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.ziphash
new file mode 100644
index 0000000..27c5665
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/redis/go-redis/v9/@v/v9.5.1.ziphash
@@ -0,0 +1 @@
+h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/list b/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/list
index b04498b..ef0ecf6 100644
--- a/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/list
+++ b/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/list
@@ -1,3 +1,3 @@
v0.1.0
v0.2.0
-v0.4.4
+v0.4.7
diff --git a/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.4.info b/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.4.info
deleted file mode 100644
index 4ad888a..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.4.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v0.4.4","Time":"2023-02-21T06:53:24Z","Origin":{"VCS":"git","URL":"https://github.com/rivo/uniseg","Ref":"refs/tags/v0.4.4","Hash":"2d50be15fe50b00a9169a11d741f8ee86bada48f"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.7.info b/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.7.info
new file mode 100644
index 0000000..aae6264
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.7.info
@@ -0,0 +1 @@
+{"Version":"v0.4.7","Time":"2024-02-08T13:16:15Z","Origin":{"VCS":"git","URL":"https://github.com/rivo/uniseg","Ref":"refs/tags/v0.4.7","Hash":"03509a98a092b522b2ff0de13e53513d18b3b837"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.4.mod b/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.7.mod
index b2dc25c..b2dc25c 100644
--- a/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.4.mod
+++ b/dependencies/pkg/mod/cache/download/github.com/rivo/uniseg/@v/v0.4.7.mod
diff --git a/dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/list b/dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/list
index ae8621b..6b889fd 100644
--- a/dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/list
+++ b/dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/list
@@ -2,4 +2,4 @@ v1.7.0
v1.7.1
v1.8.0
v1.8.1
-v1.8.4
+v1.9.0
diff --git a/dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/v1.8.4.mod b/dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/v1.8.4.mod
deleted file mode 100644
index bddefac..0000000
--- a/dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/v1.8.4.mod
+++ /dev/null
@@ -1,10 +0,0 @@
-module github.com/stretchr/testify
-
-go 1.20
-
-require (
- github.com/davecgh/go-spew v1.1.1
- github.com/pmezard/go-difflib v1.0.0
- github.com/stretchr/objx v0.5.0
- gopkg.in/yaml.v3 v3.0.1
-)
diff --git a/dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/v1.9.0.mod b/dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/v1.9.0.mod
new file mode 100644
index 0000000..943798e
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/github.com/stretchr/testify/@v/v1.9.0.mod
@@ -0,0 +1,16 @@
+module github.com/stretchr/testify
+
+// This should match the minimum supported version that is tested in
+// .github/workflows/main.yml
+go 1.17
+
+require (
+ github.com/davecgh/go-spew v1.1.1
+ github.com/pmezard/go-difflib v1.0.0
+ github.com/stretchr/objx v0.5.2
+ gopkg.in/yaml.v3 v3.0.1
+)
+
+// Break dependency cycle with objx.
+// See https://github.com/stretchr/objx/pull/140
+exclude github.com/stretchr/testify v1.8.2
diff --git a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/list b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/list
index bb04940..df40772 100644
--- a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/list
+++ b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/list
@@ -1 +1 @@
-v1.25.0
+v1.27.0
diff --git a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.info b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.info
deleted file mode 100644
index ea317d7..0000000
--- a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v1.25.0","Time":"2023-08-02T05:47:56Z","Origin":{"VCS":"git","URL":"https://github.com/uber-go/zap","Ref":"refs/tags/v1.25.0","Hash":"56468e797f1dbf3905ad7c34b5513928206244bf"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.zip b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.zip
deleted file mode 100644
index d97277c..0000000
--- a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.zip
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.ziphash b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.ziphash
deleted file mode 100644
index 31526ee..0000000
--- a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.ziphash
+++ /dev/null
@@ -1 +0,0 @@
-h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.info b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.info
new file mode 100644
index 0000000..dedb30a
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.info
@@ -0,0 +1 @@
+{"Version":"v1.27.0","Time":"2024-02-20T20:55:06Z","Origin":{"VCS":"git","URL":"https://github.com/uber-go/zap","Ref":"refs/tags/v1.27.0","Hash":"fcf8ee58669e358bbd6460bef5c2ee7a53c0803a"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.lock b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.lock
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.lock
+++ b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.lock
diff --git a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.mod b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.mod
index 455dae4..88575f4 100644
--- a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.25.0.mod
+++ b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.mod
@@ -3,9 +3,8 @@ module go.uber.org/zap
go 1.19
require (
- github.com/benbjohnson/clock v1.3.0
github.com/stretchr/testify v1.8.1
- go.uber.org/goleak v1.2.0
+ go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.10.0
gopkg.in/yaml.v3 v3.0.1
)
diff --git a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.zip b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.zip
new file mode 100644
index 0000000..02433b4
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.zip
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.ziphash b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.ziphash
new file mode 100644
index 0000000..d264908
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/go.uber.org/zap/@v/v1.27.0.ziphash
@@ -0,0 +1 @@
+h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/list b/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/list
index 3e7e41a..b555f3a 100644
--- a/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/list
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/list
@@ -1,2 +1,2 @@
v0.0.0-20220613132600-b0d781184e0d
-v0.0.0-20230809094429-853ea248256d
+v0.0.0-20240409090435-93d18d7e34b8
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20230809094429-853ea248256d.info b/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20230809094429-853ea248256d.info
deleted file mode 100644
index 7eb1914..0000000
--- a/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20230809094429-853ea248256d.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v0.0.0-20230809094429-853ea248256d","Time":"2023-08-09T09:44:29Z","Origin":{"VCS":"git","URL":"https://go.googlesource.com/exp","Hash":"853ea248256d89ff7815cf4ae1ce2efa8bbb6277"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20230809094429-853ea248256d.mod b/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20230809094429-853ea248256d.mod
deleted file mode 100644
index 798bce9..0000000
--- a/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20230809094429-853ea248256d.mod
+++ /dev/null
@@ -1,11 +0,0 @@
-module golang.org/x/exp
-
-go 1.20
-
-require (
- github.com/google/go-cmp v0.5.8
- golang.org/x/mod v0.11.0
- golang.org/x/tools v0.2.0
-)
-
-require golang.org/x/sys v0.1.0 // indirect
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20240409090435-93d18d7e34b8.info b/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20240409090435-93d18d7e34b8.info
new file mode 100644
index 0000000..d1a84ab
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20240409090435-93d18d7e34b8.info
@@ -0,0 +1 @@
+{"Version":"v0.0.0-20240409090435-93d18d7e34b8","Time":"2024-04-09T09:04:35Z","Origin":{"VCS":"git","URL":"https://go.googlesource.com/exp","Hash":"93d18d7e34b8aac3374a4710606eb1702f2d44ff"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20240409090435-93d18d7e34b8.mod b/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20240409090435-93d18d7e34b8.mod
new file mode 100644
index 0000000..fdf0cae
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/exp/@v/v0.0.0-20240409090435-93d18d7e34b8.mod
@@ -0,0 +1,11 @@
+module golang.org/x/exp
+
+go 1.20
+
+require (
+ github.com/google/go-cmp v0.5.8
+ golang.org/x/mod v0.17.0
+ golang.org/x/tools v0.20.0
+)
+
+require golang.org/x/sync v0.7.0 // indirect
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/list b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/list
index 268b033..8b20e48 100644
--- a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/list
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/list
@@ -1 +1 @@
-v0.3.0
+v0.7.0
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.info b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.info
deleted file mode 100644
index c77bee0..0000000
--- a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v0.3.0","Time":"2023-06-01T20:35:10Z","Origin":{"VCS":"git","URL":"https://go.googlesource.com/sync","Ref":"refs/tags/v0.3.0","Hash":"93782cc822b6b554cb7df40332fd010f0473cbc8"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.zip b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.zip
deleted file mode 100644
index 4e24585..0000000
--- a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.zip
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.ziphash b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.ziphash
deleted file mode 100644
index 6f70f06..0000000
--- a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.ziphash
+++ /dev/null
@@ -1 +0,0 @@
-h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.info b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.info
new file mode 100644
index 0000000..f3e986e
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.info
@@ -0,0 +1 @@
+{"Version":"v0.7.0","Time":"2024-03-04T17:26:02Z","Origin":{"VCS":"git","URL":"https://go.googlesource.com/sync","Ref":"refs/tags/v0.7.0","Hash":"14be23e5b48bec28285f8a694875175ecacfddb3"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.sum b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.lock
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.sum
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.lock
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.mod b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.mod
index 782b734..74bd0ac 100644
--- a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.3.0.mod
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.mod
@@ -1,3 +1,3 @@
module golang.org/x/sync
-go 1.17
+go 1.18
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.zip b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.zip
new file mode 100644
index 0000000..3ee7169
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.zip
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.ziphash b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.ziphash
new file mode 100644
index 0000000..e3a2e22
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/sync/@v/v0.7.0.ziphash
@@ -0,0 +1 @@
+h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/list b/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/list
index d52b9d4..e854994 100644
--- a/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/list
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/list
@@ -4,7 +4,6 @@ v0.0.0-20210320140829-1e4c9ba3b0c4
v0.0.0-20210514084401-e8d321eab015
v0.0.0-20210630005230-0f9fa26af87c
v0.0.0-20211019181941-9d821ace8654
-v0.0.0-20211216021012-1d35b9e2eb4e
v0.0.0-20220811171246-fbc7d0a398ab
v0.6.0
-v0.11.0
+v0.19.0
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.0.0-20211216021012-1d35b9e2eb4e.mod b/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.0.0-20211216021012-1d35b9e2eb4e.mod
deleted file mode 100644
index 29eb4d2..0000000
--- a/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.0.0-20211216021012-1d35b9e2eb4e.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module golang.org/x/sys
-
-go 1.17
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.11.0.info b/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.11.0.info
deleted file mode 100644
index 12e4509..0000000
--- a/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.11.0.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v0.11.0","Time":"2023-07-26T11:14:25Z","Origin":{"VCS":"git","URL":"https://go.googlesource.com/sys","Ref":"refs/tags/v0.11.0","Hash":"104d4017fa052d31a480218d213787543bc352d4"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.19.0.info b/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.19.0.info
new file mode 100644
index 0000000..9fc4699
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.19.0.info
@@ -0,0 +1 @@
+{"Version":"v0.19.0","Time":"2024-04-04T14:40:38Z","Origin":{"VCS":"git","URL":"https://go.googlesource.com/sys","Ref":"refs/tags/v0.19.0","Hash":"cabba82f75d7f55a0657810d02d534745dee5d59"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.11.0.mod b/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.19.0.mod
index 29eb4d2..9e1e4d5 100644
--- a/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.11.0.mod
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/sys/@v/v0.19.0.mod
@@ -1,3 +1,3 @@
module golang.org/x/sys
-go 1.17
+go 1.18
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/list b/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/list
index 44bf687..5f449ba 100644
--- a/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/list
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/list
@@ -1,2 +1,3 @@
v0.0.0-20200804184101-5ec99f83aff1
v0.0.0-20220907171357-04be3eba64a2
+v0.0.0-20231012003039-104605ab7028
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20220907171357-04be3eba64a2.info b/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20220907171357-04be3eba64a2.info
deleted file mode 100644
index a46d865..0000000
--- a/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20220907171357-04be3eba64a2.info
+++ /dev/null
@@ -1 +0,0 @@
-{"Version":"v0.0.0-20220907171357-04be3eba64a2","Time":"2022-09-07T17:13:57Z","Origin":{"VCS":"git","URL":"https://go.googlesource.com/xerrors","Hash":"04be3eba64a22a838cdb17b8dca15a52871c08b4"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20231012003039-104605ab7028.info b/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20231012003039-104605ab7028.info
new file mode 100644
index 0000000..91f16de
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20231012003039-104605ab7028.info
@@ -0,0 +1 @@
+{"Version":"v0.0.0-20231012003039-104605ab7028","Time":"2023-10-12T00:30:39Z","Origin":{"VCS":"git","URL":"https://go.googlesource.com/xerrors","Hash":"104605ab7028f4af38a8aff92ac848a51bd53c5d"}} \ No newline at end of file
diff --git a/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20231012003039-104605ab7028.mod b/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20231012003039-104605ab7028.mod
new file mode 100644
index 0000000..f1a2526
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/golang.org/x/xerrors/@v/v0.0.0-20231012003039-104605ab7028.mod
@@ -0,0 +1,3 @@
+module golang.org/x/xerrors
+
+go 1.18
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/cespare/xxhash/v2@v2.2.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/cespare/xxhash/v2@v2.2.0
deleted file mode 100644
index 445809b..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/cespare/xxhash/v2@v2.2.0
+++ /dev/null
@@ -1,9 +0,0 @@
-14141377
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-
-go.sum database tree
-18952661
-i1oopLcqTfH+RdRDBk5dFQOjO1oY56bBYX+dZONANl0=
-
-— sum.golang.org Az3grnkF7D4uqOVcpIHhVXugC+wkG8G47Z7oK13FC59I68OHfs/hIOyKJ93X09006pPgTvCO82fU0cmXEq42hOAqfQk=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/cespare/xxhash/v2@v2.3.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/cespare/xxhash/v2@v2.3.0
new file mode 100644
index 0000000..2b2820b
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/cespare/xxhash/v2@v2.3.0
@@ -0,0 +1,9 @@
+24962514
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+
+go.sum database tree
+25096923
+LiqZtBdWKd+Anbu0kjmGaVEF2SsvrZtPQxC1bujqO/M=
+
+— sum.golang.org Az3grl7E+FU03gK92d4JtmJMyfmCh5d2Hy5U0QjkX/mLaMC+NbEBYW5cKMFreDtFYtLTMBTL2MolRvdDafHn+5XZzQ0=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/davecgh/go-spew@v1.1.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/davecgh/go-spew@v1.1.0
index cdf14c4..af5d323 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/davecgh/go-spew@v1.1.0
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/davecgh/go-spew@v1.1.0
@@ -3,7 +3,7 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
go.sum database tree
-18951877
-osZm+4uGCX5SbYJ9yCvDFB2TyttEPMnkPrvTCAVE/+0=
+25096469
+gsd683C9t/9nYNjzznzpdVrgZ5UmMt7gS31MWKC9zlQ=
-— sum.golang.org Az3gruku2W6npuP+VGmOcLTEzr90e9hAdTK3L46MsSnoK90tXPjTpxpjzFgn0imk23A1ZEwHC/kcjI/P8dmxGURHpQE=
+— sum.golang.org Az3grpl7gQbLTidhSfCew38xhvNWp3lnbKVhHMzCuuzPAAGwd8/2XVzG+AX6Hxln/GhiM+PcXHoEHc2Wwr+CaMcaFQ4=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.10.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.10.0
index 548ec8b..c93626b 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.10.0
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.10.0
@@ -3,7 +3,7 @@ github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
go.sum database tree
-18953596
-QSBe88T4pRk6Nd+8Jr2UUqzMBpkTZyUJWUSVP+JRYAA=
+25096162
+4AxFS54e3N6of8STnSeoZ1/O8YrlLEZRHKQhdsVyAK0=
-— sum.golang.org Az3grtZ6u69dhgtKtOveKztG6bmREd0JQU3svVEBwuzoLk2wHldqnuGd72U/ceVPML9GjNgPxnwJ0Q+nD6zPfVXNqwA=
+— sum.golang.org Az3grmRjhvzLCvNhn+CWQMP6P8WB6f3Ycmr5avNvjUsAAJ4IF5CnAsnEIWWkHdtJ6NGb/PDUytxD/EF5YECS0Lk/pg0=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.15.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.15.0
deleted file mode 100644
index d75280b..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.15.0
+++ /dev/null
@@ -1,9 +0,0 @@
-16365996
-github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
-github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
-
-go.sum database tree
-18953125
-8ZuliMaVO53F1blR6CsRNpNMbdWVORlRDtlYlHCkPG8=
-
-— sum.golang.org Az3grqn7jughv7yUNWhOB9nmpDegZy62mdk5jVS+cBqpRbe2tj0EZOP5dC/0fOi+NM7GJPoGMI5MrU/7fBAdNOCtOQU=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.16.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.16.0
new file mode 100644
index 0000000..369a561
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/fatih/color@v1.16.0
@@ -0,0 +1,9 @@
+20502185
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+
+go.sum database tree
+25096792
+qg+TuA1IOBOVUg/Ux34NgGzLG9gn1+nX380PrfDq8Po=
+
+— sum.golang.org Az3groCmffSTw0WOqF3rRZlIu4o/Vvb9T1JTSkKROUPtGIt4W6mpv0Hm9n1l5pgGAUSlwlTJTEQmq0t+h+ESoXwtMg8=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-colorable@v0.1.8 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-colorable@v0.1.8
index b9e92a5..e62b340 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-colorable@v0.1.8
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-colorable@v0.1.8
@@ -3,7 +3,7 @@ github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
go.sum database tree
-18952638
-mdX0MUJqYqwGf5MgVR/FR/mc/J4txT/Wh4WYvBWox3o=
+25097952
+VBBtlsjLu0ofKP8a6IKydRTMHAF85ZvvI1MxESBrcTw=
-— sum.golang.org Az3grujzziW9B3C3dPms3TpPoUW0Bh6uXbToewJAV3NpxJMp9xaFCjfYzGexVL/9b3nhs7c04wou3pCVcfawUUeTjQQ=
+— sum.golang.org Az3grhKZhAVI4Gn9Qgj0oDOVPtzWJG+7r9KFy1YRx556tilmEdaTNFr41erNpj7r6maOQ47CKQj6RivpHt3KH023KwM=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-isatty@v0.0.19 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-isatty@v0.0.19
deleted file mode 100644
index 561cde6..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-isatty@v0.0.19
+++ /dev/null
@@ -1,9 +0,0 @@
-17579932
-github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
-github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-
-go.sum database tree
-18952034
-i+6Lip+RnoQYhdWNDqamm7qzi1wJ5Ggpm1LZfd3hkX0=
-
-— sum.golang.org Az3grvC15CdzFvb/sSqZl01F79mAiU0QeJfLv0LK71sZiy52ChQO7IDa7+fG89Klp834sYMSl4B/LNz/NH56q/HJQws=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-isatty@v0.0.20 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-isatty@v0.0.20
new file mode 100644
index 0000000..c0cdcf7
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-isatty@v0.0.20
@@ -0,0 +1,9 @@
+20103885
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+
+go.sum database tree
+25097823
+nAhyziV3Kpeo/+WLiO+o57Kt1eJhQ55AJgW6NnZlnAU=
+
+— sum.golang.org Az3grngd5bRt3P5fzWmjtwwoxok3++E01ukFT6Vt8u8AdHQXnIRvURD/RjqkEhWJx2l5Pf59PWzZ1WyTjIW2xDlC5Q8=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-runewidth@v0.0.15 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-runewidth@v0.0.15
index bc8a2ed..40226a6 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-runewidth@v0.0.15
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/mattn/go-runewidth@v0.0.15
@@ -3,7 +3,7 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
go.sum database tree
-18954122
-ueKaAz+x9Ed0b2h+3sFs/CCKpKmxNi9HlAqT2akr9b4=
+25097955
+gkiyFx75xsUmlN2G1BLSj49TRPdf1yMn95CIxt4/C2Y=
-— sum.golang.org Az3grhMncUODC4LNDsijagZ6Mipiz47qDssMbbKDPvxYKFQMyIAlekTVZFI8wrR7Z61qNN8JOXY7mmBVLFfvK2PCPwg=
+— sum.golang.org Az3grhXF53na7sglMFhGsisjkRmtI1nyPAkN4e89oSAyAOPLLBJpqKzg/Yxcx6vtDOu5fJJfXb1FAuAkqO+wZ0KNCgU=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/rivo/uniseg@v0.4.4 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/rivo/uniseg@v0.4.4
deleted file mode 100644
index e49f3b8..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/rivo/uniseg@v0.4.4
+++ /dev/null
@@ -1,9 +0,0 @@
-15724554
-github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
-github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-
-go.sum database tree
-18953900
-tO4FC17UQWSXrRj4U0l7o+kqIi8B4PK5VfyN1s0njAc=
-
-— sum.golang.org Az3grhhaEHMLzJm4fb4iko2VTpO6Gc+07cJUSx9phxpGospjgQsCF/Iuwm8R+NhEnnMQfChVlC4yx+egTDehgnq8PwU=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/rivo/uniseg@v0.4.7 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/rivo/uniseg@v0.4.7
new file mode 100644
index 0000000..66da729
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/rivo/uniseg@v0.4.7
@@ -0,0 +1,9 @@
+22647345
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+
+go.sum database tree
+25097955
+gkiyFx75xsUmlN2G1BLSj49TRPdf1yMn95CIxt4/C2Y=
+
+— sum.golang.org Az3grhXF53na7sglMFhGsisjkRmtI1nyPAkN4e89oSAyAOPLLBJpqKzg/Yxcx6vtDOu5fJJfXb1FAuAkqO+wZ0KNCgU=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.1.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.1.0
index 496222a..2245027 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.1.0
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.1.0
@@ -3,7 +3,7 @@ github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
go.sum database tree
-18951892
-TMeeFJOVLBn2tDf5yXoFJ3Eam5gNKUW1nx+J/M4MgsI=
+25097855
+JiyVCHPHBTVNoh1EvkOngeZyAB1JkYKswzlp77odRR0=
-— sum.golang.org Az3grmjouT6NJ3Yy5kRW7MEIlAg6Xh8p5teDZP7+P7cQUiOmfnlUxDt9Hjy8v3+u6I6frSgwZrmQeluU/26AkeswTQ0=
+— sum.golang.org Az3grku2YlSxuGz1pTtwFfdcHDshq2ttt0imaNgIFK5mEJR8p2CMtrwlRoBiHeBR2hiv85pYtaQV56y4tqwlzVkJNAo=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.4.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.4.0
index 38a0532..1421194 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.4.0
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.4.0
@@ -3,7 +3,7 @@ github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
go.sum database tree
-18952661
-i1oopLcqTfH+RdRDBk5dFQOjO1oY56bBYX+dZONANl0=
+25097547
+ym1dfy/lKbFR7ghPptVgGUHEWwHfd165Dj7bTW2quKQ=
-— sum.golang.org Az3grnkF7D4uqOVcpIHhVXugC+wkG8G47Z7oK13FC59I68OHfs/hIOyKJ93X09006pPgTvCO82fU0cmXEq42hOAqfQk=
+— sum.golang.org Az3gruKgyXcy38u+tZ4bZJ5D/pH5d3Beic4hOWwZUeJn6sRMZA6Dm14qcmC2hWbo2EZFB9ib96oJQ/8tsh7KrJGTtAk=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.5.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.5.0
index 55e146d..3206337 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.5.0
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/objx@v0.5.0
@@ -3,7 +3,7 @@ github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
go.sum database tree
-18953324
-FQxwX09tk1KFbnecwXTvLNfFEifulcnh3i48EjeaWJk=
+25096394
+CjDi4xe/8kqDqoJky7SySz2x6N40w/9+IH6ut+wKThs=
-— sum.golang.org Az3grvy83CHdaPby3LXTrMcEj26m/UpJqgCET1TZLybI8/U6VXMJsUJETtFR/YHFZYy+r2CvcOhjjq8pgRTb/GCMKA0=
+— sum.golang.org Az3grob2UOmC32hnfrcDsQ7BMTkiXBJ+vmY4aGFyA3IbDNx/5//lHfwCN9h+8jYX4LvfJlvOeSwXdzI/jU3OJgZarwM=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.7.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.7.0
index 46aa4a4..ed668b7 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.7.0
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.7.0
@@ -3,7 +3,7 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
go.sum database tree
-18953957
-7j+LCgWWNrPUxM5U8tqVHxEcCyFK2mxpZ9VT4ynbjbs=
+25096150
+D5Bun3mkLTzv48hY/f9reZerrAhlrun6yPQ3Ag94Klg=
-— sum.golang.org Az3grjQvZryKSAY6fG7oIEL1ibb6p/dt1NtJ9Y9zDRN+UqW1FSR8InLPpdXzvQPNhh+XSS9sda78HztJCOMKVjq0gAQ=
+— sum.golang.org Az3grsKRB0O/KJKj7DdLVjTKsddQ7yehnfgmuc1A62x4cfhvFhyGNMUuAR30l5fLE03BCduxcoC5ttxetjzZDN1NBAo=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.7.1 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.7.1
index d2b5435..5a1185b 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.7.1
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.7.1
@@ -3,7 +3,7 @@ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMT
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
go.sum database tree
-18953358
-UZSPg702sdb0P/u4Kp+hw6wDHmU3K8BG/L9DLa6lPcU=
+25096746
+AVQK3gq/nPQvGKwGY7c69jOV4yD0yKsEgT61wYKDwWk=
-— sum.golang.org Az3grrq91obQbqOhODYepCtelpXP2999TBWZd6049httAnH4tmt+d9fh5a2e1gkvKXtz3eYTR4LKcaOGQ2kfdiZzGg8=
+— sum.golang.org Az3grqOVx6kUOAdO9i3/HYF1HoTSSIVLs5QKMTn0/0Z2gFCfhPuw+HIVknzlTnh3CGWpInX28OR+MqWBHVsXDAbnwgg=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.8.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.8.0
index d0aa33e..45eb08d 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.8.0
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.8.0
@@ -3,7 +3,7 @@ github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PK
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
go.sum database tree
-18952802
-2SdxUnGw6E9CjAEIAP2SDJE8kHQojFB+qQv7tTRfC4o=
+25097591
+zNpwnNIrUSMnfy2aJa8aPGGqL7Zm0Wexn2cw6t0cG5s=
-— sum.golang.org Az3grr3s+yjQghBbJFV1fx0JzJI4KRXrD3kMsRJn/K8mCwZD90sCRCeJdf+F+tFNnhMZ4k066X37G7eekWc52BgSKQo=
+— sum.golang.org Az3grjLITMCR7h0K/bF+VEOPaOfNgpWB7GIK+2vUZOIl7RIAGboH/TFoEP2HsD8gr23ei5SbNZoxTVwctqqOLK09Aww=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.8.1 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.8.1
index 7143518..57d6af3 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.8.1
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/github.com/stretchr/testify@v1.8.1
@@ -3,7 +3,7 @@ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKs
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
go.sum database tree
-18953327
-lif/1+MdkVyjgTHS0eaZFCAW8RtzuYq5gkxJ0DPiwGo=
+25097120
+solVIziE45Rd/gHZ7ykIzHdUXdd9lsiH+ij4PDjE3RA=
-— sum.golang.org Az3grvEwnrZTyo3JIyrJEPxu8GyufVXhKa5YBXp4tLywOozeaTn18V32iscgjC6GbX3QAHVrEfBMZF2CsmrLL0yIdgs=
+— sum.golang.org Az3grv3LxIRKkPhH1CdfTZ0gK/y+6CGZ5GClnZYdAUNrgm5LG42p2kqBEiYeevfZEQeIyeCU2BwnFxtIenVYY1h3Nwo=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/go.uber.org/multierr@v1.11.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/go.uber.org/multierr@v1.11.0
index 33059df..51b606c 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/go.uber.org/multierr@v1.11.0
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/go.uber.org/multierr@v1.11.0
@@ -3,7 +3,7 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.sum database tree
-18953267
-ecsXN9wUchRN5B7oMeSLD3WwbfNuGO0mtXLFjKxu0RY=
+25096408
+NXlKTExkyTmJHn7GnJM2EFIakCZE8T8iVMg5ANG/6jU=
-— sum.golang.org Az3grh/js6IrmZExK1xpHM62NC/uNL5k/X4i3vtPPBf6FQiE+suOsHHL81jlVhzEalxXg9Fz0+zySA5ZBhkSXqdNhQ4=
+— sum.golang.org Az3grvqzuW5vnO8KwgVNuhgHUIAMy5zFuSjDUbsmI13z12zuV3b059+LzzhNw/zYYbJPCTaCje6UTDXaaUX2bzR2OAs=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/exp@v0.0.0-20230809094429-853ea248256d b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/exp@v0.0.0-20230809094429-853ea248256d
deleted file mode 100644
index baecfbb..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/exp@v0.0.0-20230809094429-853ea248256d
+++ /dev/null
@@ -1,9 +0,0 @@
-18951700
-golang.org/x/exp v0.0.0-20230809094429-853ea248256d h1:wu5bD43Ana/nF1ZmaLr3lW/FQeJU8CcI+Ln7yWHViXE=
-golang.org/x/exp v0.0.0-20230809094429-853ea248256d/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
-
-go.sum database tree
-18951965
-iSBRy2WKrv2xtYjXaEJZso0ACyIn9h5CacVEAV1urjQ=
-
-— sum.golang.org Az3grn1em1e5e2GdxE03ydNK/iCKKOjADiZYW9uiJTsvSVGwMF7jODGHuq/Tv6gYGb+UyyWQGj/jJNDUxPuROQT5Ogo=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/exp@v0.0.0-20240409090435-93d18d7e34b8 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/exp@v0.0.0-20240409090435-93d18d7e34b8
new file mode 100644
index 0000000..bff57f3
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/exp@v0.0.0-20240409090435-93d18d7e34b8
@@ -0,0 +1,9 @@
+25042178
+golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 h1:ESSUROHIBHg7USnszlcdmjBEwdMj9VUvU+OPk4yl2mc=
+golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
+
+go.sum database tree
+25097946
+x5iTra47FpybGI4qDCZCOgExMjysxxXU33VJNFewwuM=
+
+— sum.golang.org Az3grmKWKB2AkHWG0YhLvmUEuBud3WqT8ppZXC32O7comGXwZAIIWVYj6wX57idh/1uqj7B3icB8EIfY9u4c7L5dBgI=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.0.0-20211019181941-9d821ace8654 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.0.0-20211019181941-9d821ace8654
index 4002bf3..8c53285 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.0.0-20211019181941-9d821ace8654
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.0.0-20211019181941-9d821ace8654
@@ -3,7 +3,7 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjq
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
go.sum database tree
-18953039
-TSIXZlXC24m7Zf9LHUXnb3HLz5GDSAnbevERksAihe0=
+25096920
+g+9ykYTIfNS3Fnd2afnY6ppaLZ/B1WeNdoTE2FpTGjk=
-— sum.golang.org Az3grrzSdQNsgGGKRyZwwpAyGpLSS3tHxFiZLaZKRZlUf6dAY47V7DjT7Mbi5UJDGXf9Z8FSOfJeA/174CfkPtAMNQY=
+— sum.golang.org Az3grrKXYiRLI/9WgzHq8sAVFL3gXKdhHnRnLVSjmK2BnjsxVl4lhZrXINHAytywvZyxW5JlMPgGQAe9Kovf5He9Jg8=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.0.0-20211216021012-1d35b9e2eb4e b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.0.0-20211216021012-1d35b9e2eb4e
deleted file mode 100644
index caa8961..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.0.0-20211216021012-1d35b9e2eb4e
+++ /dev/null
@@ -1,9 +0,0 @@
-8409401
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-
-go.sum database tree
-18951878
-iDcF9eulYT7JuxzFPIQuPmm8IXdNM+DEC+wY8IbXkhw=
-
-— sum.golang.org Az3grgqhJSu3GE7mspXQBoNGmM1TWZnfaxjzq1WXRCq0B3BJHCQrF9cVe+nRMLPVb/rhodmmJfmNewQgzI/OAtSpCwg=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.11.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.11.0
deleted file mode 100644
index d2b6140..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.11.0
+++ /dev/null
@@ -1,9 +0,0 @@
-18876659
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-
-go.sum database tree
-18953129
-v3a0zWqBvLVRqbuy1MgsvxdkCJy2r7U+tU5jdhl3m6o=
-
-— sum.golang.org Az3grr1ZwsT2KZsxZVWYcLWtaL9xiLIfT3IlovSgT28YVq1+tKUDrk+HeteAuEHqbUvff+Oq+BRC7CNUnlBGJGeWdgE=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.19.0 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.19.0
new file mode 100644
index 0000000..4a646e6
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/sys@v0.19.0
@@ -0,0 +1,9 @@
+24955829
+golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+
+go.sum database tree
+25097945
+Reea3Nv7mPOKJ14wpcfZzIvm2jZZj49S3g2qofBzXWo=
+
+— sum.golang.org Az3grvi6WI/GBmeMZaXxLLPb4F/FQPdl5xFNhEbvot4VX97HFWfVAsZzX5+NuDrrvZlwmj+17VEq/fqvvLpvWgww6QQ=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/xerrors@v0.0.0-20200804184101-5ec99f83aff1 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/xerrors@v0.0.0-20200804184101-5ec99f83aff1
index 4c65bfb..d5deec8 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/xerrors@v0.0.0-20200804184101-5ec99f83aff1
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/xerrors@v0.0.0-20200804184101-5ec99f83aff1
@@ -3,7 +3,7 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1N
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
go.sum database tree
-18952142
-bdPwON+b+dyqZTvIQjzVuy9ci0D6Jr39323shV3WxIs=
+25097431
+hfsuwpM0Cj9aEFchQ1dGwe2nG8889BdVSzipTE2wfHs=
-— sum.golang.org Az3grlkT18/nNCfYM2QtBtiaLAGI+Qp8/Wqot8C7ugQn7IlLvcGS6/jYsC+55cihYAULL1b1k9mf75NVjzdYlsb1AQU=
+— sum.golang.org Az3grrofD7EPspURXpObK3RVWO9LSmsHJij+cDfcNEQqgH0WCsF+wVPUJkWY36KPs63OW+NBFym0yvIUMh0DCX8zXwM=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/xerrors@v0.0.0-20231012003039-104605ab7028 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/xerrors@v0.0.0-20231012003039-104605ab7028
new file mode 100644
index 0000000..97f0c72
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/golang.org/x/xerrors@v0.0.0-20231012003039-104605ab7028
@@ -0,0 +1,9 @@
+19994904
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
+
+go.sum database tree
+25097477
+NvhmgMf5o2QD6j1+1PV9HSW7zyQzUzWRan3unHZD8cs=
+
+— sum.golang.org Az3grmm1EE0nfJA20onEWRWNeKzulVR5B+RnIUw4UZ2PMBR2/6JF9yzfbbDdT6TOkckLOx6+lNGrT1q0bImxJ+ExLgs=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/gopkg.in/yaml.v3@v3.0.0-20200313102051-9f266ea9e77c b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/gopkg.in/yaml.v3@v3.0.0-20200313102051-9f266ea9e77c
index 3624e85..40134ac 100644
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/gopkg.in/yaml.v3@v3.0.0-20200313102051-9f266ea9e77c
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/lookup/gopkg.in/yaml.v3@v3.0.0-20200313102051-9f266ea9e77c
@@ -3,7 +3,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
go.sum database tree
-18953231
-oH2nMoG1WPNsOfEyhwdemlb4q1JS+S91WwZG3lEgQzY=
+25096498
+Du/7v3jrVRAeW1NFPYSI/sEe65sxK6V5KuElJbxU52I=
-— sum.golang.org Az3grl3UEB8MKdFbW2LTxviNtRsC2l79vzfooNkPBTGmjZ7PmElk5qa2Jtd/G2fpGpfVO7j0/1gNCfl4bo+pFoNphw0=
+— sum.golang.org Az3grjDrlGDDXaIKF9fq79yW8PBF3bCKT/AIv+7igQF0tEBPNSIK0715duWCIaHOCiGwplQS8T7sj+FE4wefY5aoiQE=
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x032/849 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x032/849
deleted file mode 100644
index 6cd8ecc..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x032/849
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x055/239 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x055/239
deleted file mode 100644
index f888948..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x055/239
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x061/424 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x061/424
deleted file mode 100644
index 7ab3216..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x061/424
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x063/929 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x063/929
deleted file mode 100644
index 4f918e1..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x063/929
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x068/671 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x068/671
deleted file mode 100644
index 4843266..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x068/671
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x073/736 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x073/736
deleted file mode 100644
index e96b4b4..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x073/736
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/030 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/030
deleted file mode 100644
index f543c3c..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/030
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/030.p/198 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/030.p/198
deleted file mode 100644
index 51d8012..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/030.p/198
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/031 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/031
deleted file mode 100644
index f777b3d..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/031
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/033 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/033
deleted file mode 100644
index bf822e1..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/033
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/034 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/034
deleted file mode 100644
index c57d14a..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/034
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/035 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/035
deleted file mode 100644
index da42410..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/035
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/036 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/036
deleted file mode 100644
index 6bcf7ae..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/036
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/037 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/037
deleted file mode 100644
index 8bf3865..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/037
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/037.p/124 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/037.p/124
deleted file mode 100644
index 78da791..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/037.p/124
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/038 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/038
deleted file mode 100644
index 165c292..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/038
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/038.p/229 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/038.p/229
deleted file mode 100644
index 710f537..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/038.p/229
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/039.p/138 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/039.p/138
deleted file mode 100644
index 83f9219..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x074/039.p/138
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x078/105 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x078/105
new file mode 100644
index 0000000..ba056e7
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x078/105
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x078/530 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x078/530
new file mode 100644
index 0000000..2374a13
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x078/530
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x080/086 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x080/086
new file mode 100644
index 0000000..95e5d83
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x080/086
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x088/466 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x088/466
new file mode 100644
index 0000000..9ab1677
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x088/466
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/483 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/483
new file mode 100644
index 0000000..158988a
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/483
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/509 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/509
new file mode 100644
index 0000000..35e1bd6
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/509
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/821 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/821
new file mode 100644
index 0000000..5c5d963
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x097/821
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/031 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/031
new file mode 100644
index 0000000..48cbecb
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/031
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/031.p/226 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/031.p/226
new file mode 100644
index 0000000..c0d5160
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/031.p/226
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/032 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/032
new file mode 100644
index 0000000..a7a12ce
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/032
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/033 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/033
new file mode 100644
index 0000000..bfafc97
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/033
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/034 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/034
new file mode 100644
index 0000000..bb732a8
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/034
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/035 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/035
new file mode 100644
index 0000000..0777f9e
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/035
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/036 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/036
new file mode 100644
index 0000000..fa52dd1
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/036
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/037 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/037
new file mode 100644
index 0000000..34fa5ad
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/037
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/038.p/224 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/038.p/224
new file mode 100644
index 0000000..d456ccc
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/038.p/224
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/038.p/227 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/038.p/227
new file mode 100644
index 0000000..9a08806
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/0/x098/038.p/227
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/128 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/128
deleted file mode 100644
index dc31fc6..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/128
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/215 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/215
deleted file mode 100644
index 050e930..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/215
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/239 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/239
deleted file mode 100644
index 60e0127..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/239
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/249 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/249
deleted file mode 100644
index f595bb0..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/249
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/268 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/268
deleted file mode 100644
index 3cd096d..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/268
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/288 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/288
deleted file mode 100644
index 1e1c979..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/288
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/46 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/46
deleted file mode 100644
index 9377053..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/46
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/53 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/53
deleted file mode 100644
index ea74a95..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/53
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/54 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/54
deleted file mode 100644
index 6ee97da..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/54
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/55 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/55
deleted file mode 100644
index f228c51..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/289.p/55
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/305 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/305
new file mode 100644
index 0000000..656e2ab
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/305
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/306 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/306
new file mode 100644
index 0000000..192686f
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/306
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/312 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/312
new file mode 100644
index 0000000..408a95a
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/312
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/345 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/345
new file mode 100644
index 0000000..8a59358
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/345
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/380 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/380
new file mode 100644
index 0000000..4f1d1f5
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/380
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/382.p/239 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/382.p/239
new file mode 100644
index 0000000..119ec10
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/382.p/239
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/382.p/246 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/382.p/246
new file mode 100644
index 0000000..7102b68
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/1/382.p/246
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/2/001.p/126 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/2/001.p/126
new file mode 100644
index 0000000..48bdbdf
--- /dev/null
+++ b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/2/001.p/126
Binary files differ
diff --git a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/2/001.p/33 b/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/2/001.p/33
deleted file mode 100644
index 79850c2..0000000
--- a/dependencies/pkg/mod/cache/download/sumdb/sum.golang.org/tile/8/2/001.p/33
+++ /dev/null
Binary files differ
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/.github/workflows/test.yml b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/.github/workflows/test.yml
new file mode 100644
index 0000000..c20f671
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/.github/workflows/test.yml
@@ -0,0 +1,17 @@
+name: Go tests
+on: [push, pull_request]
+jobs:
+ test:
+ name: Go ${{ matrix.go }}
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ go: [ '1.20', '1.x' ]
+ steps:
+ - uses: actions/setup-go@v2
+ with: { go-version: "${{ matrix.go }}" }
+ - uses: actions/checkout@v2
+ - run: go test -short ./...
+ - run: go test -short -tags purego ./...
+ - run: GOARCH=arm64 go test -c
+ - run: GOARCH=arm go test -c
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/LICENSE b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/LICENSE
index 6a66aea..6a66aea 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/LICENSE
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/LICENSE
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/README.md b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/README.md
new file mode 100644
index 0000000..24e2457
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/README.md
@@ -0,0 +1,14 @@
+# filippo.io/edwards25519
+
+```
+import "filippo.io/edwards25519"
+```
+
+This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives.
+Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519).
+
+The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality.
+
+Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/internal/edwards25519`/`crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative.
+
+Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements.
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/doc.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/doc.go
new file mode 100644
index 0000000..ab6aaeb
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edwards25519 implements group logic for the twisted Edwards curve
+//
+// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
+//
+// This is better known as the Edwards curve equivalent to Curve25519, and is
+// the curve used by the Ed25519 signature scheme.
+//
+// Most users don't need this package, and should instead use crypto/ed25519 for
+// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
+// github.com/gtank/ristretto255 for prime order group logic.
+//
+// However, developers who do need to interact with low-level edwards25519
+// operations can use this package, which is an extended version of
+// crypto/internal/edwards25519 from the standard library repackaged as
+// an importable module.
+package edwards25519
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/edwards25519.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/edwards25519.go
new file mode 100644
index 0000000..a744da2
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/edwards25519.go
@@ -0,0 +1,427 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "errors"
+
+ "filippo.io/edwards25519/field"
+)
+
+// Point types.
+
+type projP1xP1 struct {
+ X, Y, Z, T field.Element
+}
+
+type projP2 struct {
+ X, Y, Z field.Element
+}
+
+// Point represents a point on the edwards25519 curve.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is NOT valid, and it may be used only as a receiver.
+type Point struct {
+ // Make the type not comparable (i.e. used with == or as a map key), as
+ // equivalent points can be represented by different Go values.
+ _ incomparable
+
+ // The point is internally represented in extended coordinates (X, Y, Z, T)
+ // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
+ x, y, z, t field.Element
+}
+
+type incomparable [0]func()
+
+func checkInitialized(points ...*Point) {
+ for _, p := range points {
+ if p.x == (field.Element{}) && p.y == (field.Element{}) {
+ panic("edwards25519: use of uninitialized Point")
+ }
+ }
+}
+
+type projCached struct {
+ YplusX, YminusX, Z, T2d field.Element
+}
+
+type affineCached struct {
+ YplusX, YminusX, T2d field.Element
+}
+
+// Constructors.
+
+func (v *projP2) Zero() *projP2 {
+ v.X.Zero()
+ v.Y.One()
+ v.Z.One()
+ return v
+}
+
+// identity is the point at infinity.
+var identity, _ = new(Point).SetBytes([]byte{
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
+
+// NewIdentityPoint returns a new Point set to the identity.
+func NewIdentityPoint() *Point {
+ return new(Point).Set(identity)
+}
+
+// generator is the canonical curve basepoint. See TestGenerator for the
+// correspondence of this encoding with the values in RFC 8032.
+var generator, _ = new(Point).SetBytes([]byte{
+ 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
+
+// NewGeneratorPoint returns a new Point set to the canonical generator.
+func NewGeneratorPoint() *Point {
+ return new(Point).Set(generator)
+}
+
+func (v *projCached) Zero() *projCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.Z.One()
+ v.T2d.Zero()
+ return v
+}
+
+func (v *affineCached) Zero() *affineCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.T2d.Zero()
+ return v
+}
+
+// Assignments.
+
+// Set sets v = u, and returns v.
+func (v *Point) Set(u *Point) *Point {
+ *v = *u
+ return v
+}
+
+// Encoding.
+
+// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
+// Section 5.1.2.
+func (v *Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var buf [32]byte
+ return v.bytes(&buf)
+}
+
+func (v *Point) bytes(buf *[32]byte) []byte {
+ checkInitialized(v)
+
+ var zInv, x, y field.Element
+ zInv.Invert(&v.z) // zInv = 1 / Z
+ x.Multiply(&v.x, &zInv) // x = X / Z
+ y.Multiply(&v.y, &zInv) // y = Y / Z
+
+ out := copyFieldElement(buf, &y)
+ out[31] |= byte(x.IsNegative() << 7)
+ return out
+}
+
+var feOne = new(field.Element).One()
+
+// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
+// represent a valid point on the curve, SetBytes returns nil and an error and
+// the receiver is unchanged. Otherwise, SetBytes returns v.
+//
+// Note that SetBytes accepts all non-canonical encodings of valid points.
+// That is, it follows decoding rules that match most implementations in
+// the ecosystem rather than RFC 8032.
+func (v *Point) SetBytes(x []byte) (*Point, error) {
+ // Specifically, the non-canonical encodings that are accepted are
+ // 1) the ones where the field element is not reduced (see the
+ // (*field.Element).SetBytes docs) and
+ // 2) the ones where the x-coordinate is zero and the sign bit is set.
+ //
+ // Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am,
+ // specifically the "Canonical A, R" section.
+
+ y, err := new(field.Element).SetBytes(x)
+ if err != nil {
+ return nil, errors.New("edwards25519: invalid point encoding length")
+ }
+
+ // -x² + y² = 1 + dx²y²
+ // x² + dx²y² = x²(dy² + 1) = y² - 1
+ // x² = (y² - 1) / (dy² + 1)
+
+ // u = y² - 1
+ y2 := new(field.Element).Square(y)
+ u := new(field.Element).Subtract(y2, feOne)
+
+ // v = dy² + 1
+ vv := new(field.Element).Multiply(y2, d)
+ vv = vv.Add(vv, feOne)
+
+ // x = +√(u/v)
+ xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
+ if wasSquare == 0 {
+ return nil, errors.New("edwards25519: invalid point encoding")
+ }
+
+ // Select the negative square root if the sign bit is set.
+ xxNeg := new(field.Element).Negate(xx)
+ xx = xx.Select(xxNeg, xx, int(x[31]>>7))
+
+ v.x.Set(xx)
+ v.y.Set(y)
+ v.z.One()
+ v.t.Multiply(xx, y) // xy = T / Z
+
+ return v, nil
+}
+
+func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
+ copy(buf[:], v.Bytes())
+ return buf[:]
+}
+
+// Conversions.
+
+func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
+ v.X.Multiply(&p.X, &p.T)
+ v.Y.Multiply(&p.Y, &p.Z)
+ v.Z.Multiply(&p.Z, &p.T)
+ return v
+}
+
+func (v *projP2) FromP3(p *Point) *projP2 {
+ v.X.Set(&p.x)
+ v.Y.Set(&p.y)
+ v.Z.Set(&p.z)
+ return v
+}
+
+func (v *Point) fromP1xP1(p *projP1xP1) *Point {
+ v.x.Multiply(&p.X, &p.T)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Multiply(&p.Z, &p.T)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+func (v *Point) fromP2(p *projP2) *Point {
+ v.x.Multiply(&p.X, &p.Z)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Square(&p.Z)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+// d is a constant in the curve equation.
+var d, _ = new(field.Element).SetBytes([]byte{
+ 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
+ 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
+ 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
+ 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
+var d2 = new(field.Element).Add(d, d)
+
+func (v *projCached) FromP3(p *Point) *projCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.Z.Set(&p.z)
+ v.T2d.Multiply(&p.t, d2)
+ return v
+}
+
+func (v *affineCached) FromP3(p *Point) *affineCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.T2d.Multiply(&p.t, d2)
+
+ var invZ field.Element
+ invZ.Invert(&p.z)
+ v.YplusX.Multiply(&v.YplusX, &invZ)
+ v.YminusX.Multiply(&v.YminusX, &invZ)
+ v.T2d.Multiply(&v.T2d, &invZ)
+ return v
+}
+
+// (Re)addition and subtraction.
+
+// Add sets v = p + q, and returns v.
+func (v *Point) Add(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Add(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+// Subtract sets v = p - q, and returns v.
+func (v *Point) Subtract(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Sub(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&ZZ2, &TT2d)
+ v.T.Subtract(&ZZ2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
+ v.T.Add(&ZZ2, &TT2d) // flipped sign
+ return v
+}
+
+func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&Z2, &TT2d)
+ v.T.Subtract(&Z2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&Z2, &TT2d) // flipped sign
+ v.T.Add(&Z2, &TT2d) // flipped sign
+ return v
+}
+
+// Doubling.
+
+func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
+ var XX, YY, ZZ2, XplusYsq field.Element
+
+ XX.Square(&p.X)
+ YY.Square(&p.Y)
+ ZZ2.Square(&p.Z)
+ ZZ2.Add(&ZZ2, &ZZ2)
+ XplusYsq.Add(&p.X, &p.Y)
+ XplusYsq.Square(&XplusYsq)
+
+ v.Y.Add(&YY, &XX)
+ v.Z.Subtract(&YY, &XX)
+
+ v.X.Subtract(&XplusYsq, &v.Y)
+ v.T.Subtract(&ZZ2, &v.Z)
+ return v
+}
+
+// Negation.
+
+// Negate sets v = -p, and returns v.
+func (v *Point) Negate(p *Point) *Point {
+ checkInitialized(p)
+ v.x.Negate(&p.x)
+ v.y.Set(&p.y)
+ v.z.Set(&p.z)
+ v.t.Negate(&p.t)
+ return v
+}
+
+// Equal returns 1 if v is equivalent to u, and 0 otherwise.
+func (v *Point) Equal(u *Point) int {
+ checkInitialized(v, u)
+
+ var t1, t2, t3, t4 field.Element
+ t1.Multiply(&v.x, &u.z)
+ t2.Multiply(&u.x, &v.z)
+ t3.Multiply(&v.y, &u.z)
+ t4.Multiply(&u.y, &v.z)
+
+ return t1.Equal(&t2) & t3.Equal(&t4)
+}
+
+// Constant-time operations
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *projCached) Select(a, b *projCached, cond int) *projCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.Z.Select(&a.Z, &b.Z, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *projCached) CondNeg(cond int) *projCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *affineCached) CondNeg(cond int) *affineCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/edwards25519_test.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/edwards25519_test.go
new file mode 100644
index 0000000..fe511bd
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/edwards25519_test.go
@@ -0,0 +1,311 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "encoding/hex"
+ "reflect"
+ "testing"
+
+ "filippo.io/edwards25519/field"
+)
+
+var B = NewGeneratorPoint()
+var I = NewIdentityPoint()
+
+func checkOnCurve(t *testing.T, points ...*Point) {
+ t.Helper()
+ for i, p := range points {
+ var XX, YY, ZZ, ZZZZ field.Element
+ XX.Square(&p.x)
+ YY.Square(&p.y)
+ ZZ.Square(&p.z)
+ ZZZZ.Square(&ZZ)
+ // -x² + y² = 1 + dx²y²
+ // -(X/Z)² + (Y/Z)² = 1 + d(X/Z)²(Y/Z)²
+ // (-X² + Y²)/Z² = 1 + (dX²Y²)/Z⁴
+ // (-X² + Y²)*Z² = Z⁴ + dX²Y²
+ var lhs, rhs field.Element
+ lhs.Subtract(&YY, &XX).Multiply(&lhs, &ZZ)
+ rhs.Multiply(d, &XX).Multiply(&rhs, &YY).Add(&rhs, &ZZZZ)
+ if lhs.Equal(&rhs) != 1 {
+ t.Errorf("X, Y, and Z do not specify a point on the curve\nX = %v\nY = %v\nZ = %v", p.x, p.y, p.z)
+ }
+ // xy = T/Z
+ lhs.Multiply(&p.x, &p.y)
+ rhs.Multiply(&p.z, &p.t)
+ if lhs.Equal(&rhs) != 1 {
+ t.Errorf("point %d is not valid\nX = %v\nY = %v\nZ = %v", i, p.x, p.y, p.z)
+ }
+ }
+}
+
+func TestGenerator(t *testing.T) {
+ // These are the coordinates of B from RFC 8032, Section 5.1, converted to
+ // little endian hex.
+ x := "1ad5258f602d56c9b2a7259560c72c695cdcd6fd31e2a4c0fe536ecdd3366921"
+ y := "5866666666666666666666666666666666666666666666666666666666666666"
+ if got := hex.EncodeToString(B.x.Bytes()); got != x {
+ t.Errorf("wrong B.x: got %s, expected %s", got, x)
+ }
+ if got := hex.EncodeToString(B.y.Bytes()); got != y {
+ t.Errorf("wrong B.y: got %s, expected %s", got, y)
+ }
+ if B.z.Equal(feOne) != 1 {
+ t.Errorf("wrong B.z: got %v, expected 1", B.z)
+ }
+ // Check that t is correct.
+ checkOnCurve(t, B)
+}
+
+func TestAddSubNegOnBasePoint(t *testing.T) {
+ checkLhs, checkRhs := &Point{}, &Point{}
+
+ checkLhs.Add(B, B)
+ tmpP2 := new(projP2).FromP3(B)
+ tmpP1xP1 := new(projP1xP1).Double(tmpP2)
+ checkRhs.fromP1xP1(tmpP1xP1)
+ if checkLhs.Equal(checkRhs) != 1 {
+ t.Error("B + B != [2]B")
+ }
+ checkOnCurve(t, checkLhs, checkRhs)
+
+ checkLhs.Subtract(B, B)
+ Bneg := new(Point).Negate(B)
+ checkRhs.Add(B, Bneg)
+ if checkLhs.Equal(checkRhs) != 1 {
+ t.Error("B - B != B + (-B)")
+ }
+ if I.Equal(checkLhs) != 1 {
+ t.Error("B - B != 0")
+ }
+ if I.Equal(checkRhs) != 1 {
+ t.Error("B + (-B) != 0")
+ }
+ checkOnCurve(t, checkLhs, checkRhs, Bneg)
+}
+
+func TestComparable(t *testing.T) {
+ if reflect.TypeOf(Point{}).Comparable() {
+ t.Error("Point is unexpectedly comparable")
+ }
+}
+
+func TestInvalidEncodings(t *testing.T) {
+ // An invalid point, that also happens to have y > p.
+ invalid := "efffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f"
+ p := NewGeneratorPoint()
+ if out, err := p.SetBytes(decodeHex(invalid)); err == nil {
+ t.Error("expected error for invalid point")
+ } else if out != nil {
+ t.Error("SetBytes did not return nil on an invalid encoding")
+ } else if p.Equal(B) != 1 {
+ t.Error("the Point was modified while decoding an invalid encoding")
+ }
+ checkOnCurve(t, p)
+}
+
+func TestNonCanonicalPoints(t *testing.T) {
+ type test struct {
+ name string
+ encoding, canonical string
+ }
+ tests := []test{
+ // Points with x = 0 and the sign bit set. With x = 0 the curve equation
+ // gives y² = 1, so y = ±1. 1 has two valid encodings.
+ {
+ "y=1,sign-",
+ "0100000000000000000000000000000000000000000000000000000000000080",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+1,sign-",
+ "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p-1,sign-",
+ "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ },
+
+ // Non-canonical y encodings with values 2²⁵⁵-19 (p) to 2²⁵⁵-1 (p+18).
+ {
+ "y=p,sign+",
+ "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p,sign-",
+ "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0000000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+1,sign+",
+ "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ // "y=p+1,sign-" is already tested above.
+ // p+2 is not a valid y-coordinate.
+ {
+ "y=p+3,sign+",
+ "f0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0300000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+3,sign-",
+ "f0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0300000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+4,sign+",
+ "f1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0400000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+4,sign-",
+ "f1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0400000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+5,sign+",
+ "f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0500000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+5,sign-",
+ "f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0500000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+6,sign+",
+ "f3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0600000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+6,sign-",
+ "f3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0600000000000000000000000000000000000000000000000000000000000080",
+ },
+ // p+7 is not a valid y-coordinate.
+ // p+8 is not a valid y-coordinate.
+ {
+ "y=p+9,sign+",
+ "f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0900000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+9,sign-",
+ "f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0900000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+10,sign+",
+ "f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0a00000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+10,sign-",
+ "f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0a00000000000000000000000000000000000000000000000000000000000080",
+ },
+ // p+11 is not a valid y-coordinate.
+ // p+12 is not a valid y-coordinate.
+ // p+13 is not a valid y-coordinate.
+ {
+ "y=p+14,sign+",
+ "fbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0e00000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+14,sign-",
+ "fbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0e00000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+15,sign+",
+ "fcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0f00000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+15,sign-",
+ "fcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0f00000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+16,sign+",
+ "fdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "1000000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+16,sign-",
+ "fdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "1000000000000000000000000000000000000000000000000000000000000080",
+ },
+ // p+17 is not a valid y-coordinate.
+ {
+ "y=p+18,sign+",
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "1200000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+18,sign-",
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "1200000000000000000000000000000000000000000000000000000000000080",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ p1, err := new(Point).SetBytes(decodeHex(tt.encoding))
+ if err != nil {
+ t.Fatalf("error decoding non-canonical point: %v", err)
+ }
+ p2, err := new(Point).SetBytes(decodeHex(tt.canonical))
+ if err != nil {
+ t.Fatalf("error decoding canonical point: %v", err)
+ }
+ if p1.Equal(p2) != 1 {
+ t.Errorf("equivalent points are not equal: %v, %v", p1, p2)
+ }
+ if encoding := hex.EncodeToString(p1.Bytes()); encoding != tt.canonical {
+ t.Errorf("re-encoding does not match canonical; got %q, expected %q", encoding, tt.canonical)
+ }
+ checkOnCurve(t, p1, p2)
+ })
+ }
+}
+
+var testAllocationsSink byte
+
+func TestAllocations(t *testing.T) {
+ if allocs := testing.AllocsPerRun(100, func() {
+ p := NewIdentityPoint()
+ p.Add(p, NewGeneratorPoint())
+ s := NewScalar()
+ testAllocationsSink ^= s.Bytes()[0]
+ testAllocationsSink ^= p.Bytes()[0]
+ }); allocs > 0 {
+ t.Errorf("expected zero allocations, got %0.1v", allocs)
+ }
+}
+
+func decodeHex(s string) []byte {
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+func BenchmarkEncodingDecoding(b *testing.B) {
+ p := new(Point).Set(dalekScalarBasepoint)
+ for i := 0; i < b.N; i++ {
+ buf := p.Bytes()
+ _, err := p.SetBytes(buf)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/extra.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/extra.go
new file mode 100644
index 0000000..d152d68
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/extra.go
@@ -0,0 +1,349 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+// This file contains additional functionality that is not included in the
+// upstream crypto/internal/edwards25519 package.
+
+import (
+ "errors"
+
+ "filippo.io/edwards25519/field"
+)
+
+// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where
+// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
+func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap. Don't change the style without making
+ // sure it doesn't increase the inliner cost.
+ var e [4]field.Element
+ X, Y, Z, T = v.extendedCoordinates(&e)
+ return
+}
+
+func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) {
+ checkInitialized(v)
+ X = e[0].Set(&v.x)
+ Y = e[1].Set(&v.y)
+ Z = e[2].Set(&v.z)
+ T = e[3].Set(&v.t)
+ return
+}
+
+// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where
+// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
+//
+// If the coordinates are invalid or don't represent a valid point on the curve,
+// SetExtendedCoordinates returns nil and an error and the receiver is
+// unchanged. Otherwise, SetExtendedCoordinates returns v.
+func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) {
+ if !isOnCurve(X, Y, Z, T) {
+ return nil, errors.New("edwards25519: invalid point coordinates")
+ }
+ v.x.Set(X)
+ v.y.Set(Y)
+ v.z.Set(Z)
+ v.t.Set(T)
+ return v, nil
+}
+
+func isOnCurve(X, Y, Z, T *field.Element) bool {
+ var lhs, rhs field.Element
+ XX := new(field.Element).Square(X)
+ YY := new(field.Element).Square(Y)
+ ZZ := new(field.Element).Square(Z)
+ TT := new(field.Element).Square(T)
+ // -x² + y² = 1 + dx²y²
+ // -(X/Z)² + (Y/Z)² = 1 + d(T/Z)²
+ // -X² + Y² = Z² + dT²
+ lhs.Subtract(YY, XX)
+ rhs.Multiply(d, TT).Add(&rhs, ZZ)
+ if lhs.Equal(&rhs) != 1 {
+ return false
+ }
+ // xy = T/Z
+ // XY/Z² = T/Z
+ // XY = TZ
+ lhs.Multiply(X, Y)
+ rhs.Multiply(T, Z)
+ return lhs.Equal(&rhs) == 1
+}
+
+// BytesMontgomery converts v to a point on the birationally-equivalent
+// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding
+// according to RFC 7748.
+//
+// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode
+// to the same value. If v is the identity point, BytesMontgomery returns 32
+// zero bytes, analogously to the X25519 function.
+//
+// The lack of an inverse operation (such as SetMontgomeryBytes) is deliberate:
+// while every valid edwards25519 point has a unique u-coordinate Montgomery
+// encoding, X25519 accepts inputs on the quadratic twist, which don't correspond
+// to any edwards25519 point, and every other X25519 input corresponds to two
+// edwards25519 points.
+func (v *Point) BytesMontgomery() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var buf [32]byte
+ return v.bytesMontgomery(&buf)
+}
+
+func (v *Point) bytesMontgomery(buf *[32]byte) []byte {
+ checkInitialized(v)
+
+ // RFC 7748, Section 4.1 provides the bilinear map to calculate the
+ // Montgomery u-coordinate
+ //
+ // u = (1 + y) / (1 - y)
+ //
+ // where y = Y / Z.
+
+ var y, recip, u field.Element
+
+ y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z
+ recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y)
+ u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r
+
+ return copyFieldElement(buf, &u)
+}
+
+// MultByCofactor sets v = 8 * p, and returns v.
+func (v *Point) MultByCofactor(p *Point) *Point {
+ checkInitialized(p)
+ result := projP1xP1{}
+ pp := (&projP2{}).FromP3(p)
+ result.Double(pp)
+ pp.FromP1xP1(&result)
+ result.Double(pp)
+ pp.FromP1xP1(&result)
+ result.Double(pp)
+ return v.fromP1xP1(&result)
+}
+
+// Given k > 0, set s = s**(2*i).
+func (s *Scalar) pow2k(k int) {
+ for i := 0; i < k; i++ {
+ s.Multiply(s, s)
+ }
+}
+
+// Invert sets s to the inverse of a nonzero scalar v, and returns s.
+//
+// If t is zero, Invert returns zero.
+func (s *Scalar) Invert(t *Scalar) *Scalar {
+ // Uses a hardcoded sliding window of width 4.
+ var table [8]Scalar
+ var tt Scalar
+ tt.Multiply(t, t)
+ table[0] = *t
+ for i := 0; i < 7; i++ {
+ table[i+1].Multiply(&table[i], &tt)
+ }
+ // Now table = [t**1, t**3, t**5, t**7, t**9, t**11, t**13, t**15]
+ // so t**k = t[k/2] for odd k
+
+ // To compute the sliding window digits, use the following Sage script:
+
+ // sage: import itertools
+ // sage: def sliding_window(w,k):
+ // ....: digits = []
+ // ....: while k > 0:
+ // ....: if k % 2 == 1:
+ // ....: kmod = k % (2**w)
+ // ....: digits.append(kmod)
+ // ....: k = k - kmod
+ // ....: else:
+ // ....: digits.append(0)
+ // ....: k = k // 2
+ // ....: return digits
+
+ // Now we can compute s roughly as follows:
+
+ // sage: s = 1
+ // sage: for coeff in reversed(sliding_window(4,l-2)):
+ // ....: s = s*s
+ // ....: if coeff > 0 :
+ // ....: s = s*t**coeff
+
+ // This works on one bit at a time, with many runs of zeros.
+ // The digits can be collapsed into [(count, coeff)] as follows:
+
+ // sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))]
+
+ // Entries of the form (k, 0) turn into pow2k(k)
+ // Entries of the form (1, coeff) turn into a squaring and then a table lookup.
+ // We can fold the squaring into the previous pow2k(k) as pow2k(k+1).
+
+ *s = table[1/2]
+ s.pow2k(127 + 1)
+ s.Multiply(s, &table[1/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[13/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[5/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[1/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(5 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(9 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[13/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[11/2])
+
+ return s
+}
+
+// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
+//
+// Execution time depends only on the lengths of the two slices, which must match.
+func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point {
+ if len(scalars) != len(points) {
+ panic("edwards25519: called MultiScalarMult with different size inputs")
+ }
+ checkInitialized(points...)
+
+ // Proceed as in the single-base case, but share doublings
+ // between each point in the multiscalar equation.
+
+ // Build lookup tables for each point
+ tables := make([]projLookupTable, len(points))
+ for i := range tables {
+ tables[i].FromP3(points[i])
+ }
+ // Compute signed radix-16 digits for each scalar
+ digits := make([][64]int8, len(scalars))
+ for i := range digits {
+ digits[i] = scalars[i].signedRadix16()
+ }
+
+ // Unwrap first loop iteration to save computing 16*identity
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ // Lookup-and-add the appropriate multiple of each input point
+ for j := range tables {
+ tables[j].SelectInto(multiple, digits[j][63])
+ tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords
+ v.fromP1xP1(tmp1) // update v
+ }
+ tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
+ for i := 62; i >= 0; i-- {
+ tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
+ v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
+ // Lookup-and-add the appropriate multiple of each input point
+ for j := range tables {
+ tables[j].SelectInto(multiple, digits[j][i])
+ tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords
+ v.fromP1xP1(tmp1) // update v
+ }
+ tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
+ }
+ return v
+}
+
+// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point {
+ if len(scalars) != len(points) {
+ panic("edwards25519: called VarTimeMultiScalarMult with different size inputs")
+ }
+ checkInitialized(points...)
+
+ // Generalize double-base NAF computation to arbitrary sizes.
+ // Here all the points are dynamic, so we only use the smaller
+ // tables.
+
+ // Build lookup tables for each point
+ tables := make([]nafLookupTable5, len(points))
+ for i := range tables {
+ tables[i].FromP3(points[i])
+ }
+ // Compute a NAF for each scalar
+ nafs := make([][256]int8, len(scalars))
+ for i := range nafs {
+ nafs[i] = scalars[i].nonAdjacentForm(5)
+ }
+
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp2.Zero()
+
+ // Move from high to low bits, doubling the accumulator
+ // at each iteration and checking whether there is a nonzero
+ // coefficient to look up a multiple of.
+ //
+ // Skip trying to find the first nonzero coefficent, because
+ // searching might be more work than a few extra doublings.
+ for i := 255; i >= 0; i-- {
+ tmp1.Double(tmp2)
+
+ for j := range nafs {
+ if nafs[j][i] > 0 {
+ v.fromP1xP1(tmp1)
+ tables[j].SelectInto(multiple, nafs[j][i])
+ tmp1.Add(v, multiple)
+ } else if nafs[j][i] < 0 {
+ v.fromP1xP1(tmp1)
+ tables[j].SelectInto(multiple, -nafs[j][i])
+ tmp1.Sub(v, multiple)
+ }
+ }
+
+ tmp2.FromP1xP1(tmp1)
+ }
+
+ v.fromP2(tmp2)
+ return v
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/extra_test.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/extra_test.go
new file mode 100644
index 0000000..6fb832a
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/extra_test.go
@@ -0,0 +1,220 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "testing"
+ "testing/quick"
+)
+
+// TestBytesMontgomery tests the SetBytesWithClamping+BytesMontgomery path
+// equivalence to curve25519.X25519 for basepoint scalar multiplications.
+//
+// Note that you can't actually implement X25519 with this package because
+// there is no SetBytesMontgomery, and it would not be possible to implement
+// it properly: points on the twist would get rejected, and the Scalar returned
+// by SetBytesWithClamping does not preserve its cofactor-clearing properties.
+//
+// Disabled to avoid the golang.org/x/crypto module dependency.
+/* func TestBytesMontgomery(t *testing.T) {
+ f := func(scalar [32]byte) bool {
+ s := NewScalar().SetBytesWithClamping(scalar[:])
+ p := (&Point{}).ScalarBaseMult(s)
+ got := p.BytesMontgomery()
+ want, _ := curve25519.X25519(scalar[:], curve25519.Basepoint)
+ return bytes.Equal(got, want)
+ }
+ if err := quick.Check(f, nil); err != nil {
+ t.Error(err)
+ }
+} */
+
+func TestBytesMontgomerySodium(t *testing.T) {
+ // Generated with libsodium.js 1.0.18
+ // crypto_sign_keypair().publicKey
+ publicKey := "3bf918ffc2c955dc895bf145f566fb96623c1cadbe040091175764b5fde322c0"
+ p, err := (&Point{}).SetBytes(decodeHex(publicKey))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // crypto_sign_ed25519_pk_to_curve25519(publicKey)
+ want := "efc6c9d0738e9ea18d738ad4a2653631558931b0f1fde4dd58c436d19686dc28"
+ if got := hex.EncodeToString(p.BytesMontgomery()); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+func TestBytesMontgomeryInfinity(t *testing.T) {
+ p := NewIdentityPoint()
+ want := "0000000000000000000000000000000000000000000000000000000000000000"
+ if got := hex.EncodeToString(p.BytesMontgomery()); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+func TestMultByCofactor(t *testing.T) {
+ lowOrderBytes := "26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85"
+ lowOrder, err := (&Point{}).SetBytes(decodeHex(lowOrderBytes))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if p := (&Point{}).MultByCofactor(lowOrder); p.Equal(NewIdentityPoint()) != 1 {
+ t.Errorf("expected low order point * cofactor to be the identity")
+ }
+
+ f := func(scalar [64]byte) bool {
+ s, _ := NewScalar().SetUniformBytes(scalar[:])
+ p := (&Point{}).ScalarBaseMult(s)
+ p8 := (&Point{}).MultByCofactor(p)
+ checkOnCurve(t, p8)
+
+ // 8 * p == (8 * s) * B
+ reprEight := [32]byte{8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ scEight, _ := (&Scalar{}).SetCanonicalBytes(reprEight[:])
+ s.Multiply(s, scEight)
+ pp := (&Point{}).ScalarBaseMult(s)
+ if p8.Equal(pp) != 1 {
+ return false
+ }
+
+ // 8 * p == 8 * (lowOrder + p)
+ pp.Add(p, lowOrder)
+ pp.MultByCofactor(pp)
+ if p8.Equal(pp) != 1 {
+ return false
+ }
+
+ // 8 * p == p + p + p + p + p + p + p + p
+ pp.Set(NewIdentityPoint())
+ for i := 0; i < 8; i++ {
+ pp.Add(pp, p)
+ }
+ return p8.Equal(pp) == 1
+ }
+ if err := quick.Check(f, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarInvert(t *testing.T) {
+ invertWorks := func(xInv Scalar, x notZeroScalar) bool {
+ xInv.Invert((*Scalar)(&x))
+ var check Scalar
+ check.Multiply((*Scalar)(&x), &xInv)
+
+ return check.Equal(scOne) == 1 && isReduced(xInv.Bytes())
+ }
+
+ if err := quick.Check(invertWorks, quickCheckConfig(32)); err != nil {
+ t.Error(err)
+ }
+
+ randomScalar := *dalekScalar
+ randomInverse := NewScalar().Invert(&randomScalar)
+ var check Scalar
+ check.Multiply(&randomScalar, randomInverse)
+
+ if check.Equal(scOne) == 0 || !isReduced(randomInverse.Bytes()) {
+ t.Error("inversion did not work")
+ }
+
+ zero := NewScalar()
+ if xx := NewScalar().Invert(zero); xx.Equal(zero) != 1 {
+ t.Errorf("inverting zero did not return zero")
+ }
+}
+
+func TestMultiScalarMultMatchesBaseMult(t *testing.T) {
+ multiScalarMultMatchesBaseMult := func(x, y, z Scalar) bool {
+ var p, q1, q2, q3, check Point
+
+ p.MultiScalarMult([]*Scalar{&x, &y, &z}, []*Point{B, B, B})
+
+ q1.ScalarBaseMult(&x)
+ q2.ScalarBaseMult(&y)
+ q3.ScalarBaseMult(&z)
+ check.Add(&q1, &q2).Add(&check, &q3)
+
+ checkOnCurve(t, &p, &check, &q1, &q2, &q3)
+ return p.Equal(&check) == 1
+ }
+
+ if err := quick.Check(multiScalarMultMatchesBaseMult, quickCheckConfig(32)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestVarTimeMultiScalarMultMatchesBaseMult(t *testing.T) {
+ varTimeMultiScalarMultMatchesBaseMult := func(x, y, z Scalar) bool {
+ var p, q1, q2, q3, check Point
+
+ p.VarTimeMultiScalarMult([]*Scalar{&x, &y, &z}, []*Point{B, B, B})
+
+ q1.ScalarBaseMult(&x)
+ q2.ScalarBaseMult(&y)
+ q3.ScalarBaseMult(&z)
+ check.Add(&q1, &q2).Add(&check, &q3)
+
+ checkOnCurve(t, &p, &check, &q1, &q2, &q3)
+ return p.Equal(&check) == 1
+ }
+
+ if err := quick.Check(varTimeMultiScalarMultMatchesBaseMult, quickCheckConfig(32)); err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkMultiScalarMultSize8(t *testing.B) {
+ var p Point
+ x := dalekScalar
+
+ for i := 0; i < t.N; i++ {
+ p.MultiScalarMult([]*Scalar{x, x, x, x, x, x, x, x},
+ []*Point{B, B, B, B, B, B, B, B})
+ }
+}
+
+func BenchmarkScalarAddition(b *testing.B) {
+ var rnd [128]byte
+ rand.Read(rnd[:])
+ s1, _ := (&Scalar{}).SetUniformBytes(rnd[0:64])
+ s2, _ := (&Scalar{}).SetUniformBytes(rnd[64:128])
+ t := &Scalar{}
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ t.Add(s1, s2)
+ }
+}
+
+func BenchmarkScalarMultiplication(b *testing.B) {
+ var rnd [128]byte
+ rand.Read(rnd[:])
+ s1, _ := (&Scalar{}).SetUniformBytes(rnd[0:64])
+ s2, _ := (&Scalar{}).SetUniformBytes(rnd[64:128])
+ t := &Scalar{}
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ t.Multiply(s1, s2)
+ }
+}
+
+func BenchmarkScalarInversion(b *testing.B) {
+ var rnd [64]byte
+ rand.Read(rnd[:])
+ s1, _ := (&Scalar{}).SetUniformBytes(rnd[0:64])
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ s1.Invert(s1)
+ }
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe.go
new file mode 100644
index 0000000..5518ef2
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe.go
@@ -0,0 +1,420 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package field implements fast arithmetic modulo 2^255-19.
+package field
+
+import (
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+// Element represents an element of the field GF(2^255-19). Note that this
+// is not a cryptographically secure group, and should only be used to interact
+// with edwards25519.Point coordinates.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Element struct {
+ // An element t represents the integer
+ // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
+ //
+ // Between operations, all limbs are expected to be lower than 2^52.
+ l0 uint64
+ l1 uint64
+ l2 uint64
+ l3 uint64
+ l4 uint64
+}
+
+const maskLow51Bits uint64 = (1 << 51) - 1
+
+var feZero = &Element{0, 0, 0, 0, 0}
+
+// Zero sets v = 0, and returns v.
+func (v *Element) Zero() *Element {
+ *v = *feZero
+ return v
+}
+
+var feOne = &Element{1, 0, 0, 0, 0}
+
+// One sets v = 1, and returns v.
+func (v *Element) One() *Element {
+ *v = *feOne
+ return v
+}
+
+// reduce reduces v modulo 2^255 - 19 and returns it.
+func (v *Element) reduce() *Element {
+ v.carryPropagate()
+
+ // After the light reduction we now have a field element representation
+ // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
+
+ // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
+ // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
+ c := (v.l0 + 19) >> 51
+ c = (v.l1 + c) >> 51
+ c = (v.l2 + c) >> 51
+ c = (v.l3 + c) >> 51
+ c = (v.l4 + c) >> 51
+
+ // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
+ // effectively applying the reduction identity to the carry.
+ v.l0 += 19 * c
+
+ v.l1 += v.l0 >> 51
+ v.l0 = v.l0 & maskLow51Bits
+ v.l2 += v.l1 >> 51
+ v.l1 = v.l1 & maskLow51Bits
+ v.l3 += v.l2 >> 51
+ v.l2 = v.l2 & maskLow51Bits
+ v.l4 += v.l3 >> 51
+ v.l3 = v.l3 & maskLow51Bits
+ // no additional carry
+ v.l4 = v.l4 & maskLow51Bits
+
+ return v
+}
+
+// Add sets v = a + b, and returns v.
+func (v *Element) Add(a, b *Element) *Element {
+ v.l0 = a.l0 + b.l0
+ v.l1 = a.l1 + b.l1
+ v.l2 = a.l2 + b.l2
+ v.l3 = a.l3 + b.l3
+ v.l4 = a.l4 + b.l4
+ // Using the generic implementation here is actually faster than the
+ // assembly. Probably because the body of this function is so simple that
+ // the compiler can figure out better optimizations by inlining the carry
+ // propagation.
+ return v.carryPropagateGeneric()
+}
+
+// Subtract sets v = a - b, and returns v.
+func (v *Element) Subtract(a, b *Element) *Element {
+ // We first add 2 * p, to guarantee the subtraction won't underflow, and
+ // then subtract b (which can be up to 2^255 + 2^13 * 19).
+ v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
+ v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
+ v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
+ v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
+ v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
+ return v.carryPropagate()
+}
+
+// Negate sets v = -a, and returns v.
+func (v *Element) Negate(a *Element) *Element {
+ return v.Subtract(feZero, a)
+}
+
+// Invert sets v = 1/z mod p, and returns v.
+//
+// If z == 0, Invert returns v = 0.
+func (v *Element) Invert(z *Element) *Element {
+ // Inversion is implemented as exponentiation with exponent p − 2. It uses the
+ // same sequence of 255 squarings and 11 multiplications as [Curve25519].
+ var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
+
+ z2.Square(z) // 2
+ t.Square(&z2) // 4
+ t.Square(&t) // 8
+ z9.Multiply(&t, z) // 9
+ z11.Multiply(&z9, &z2) // 11
+ t.Square(&z11) // 22
+ z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
+
+ t.Square(&z2_5_0) // 2^6 - 2^1
+ for i := 0; i < 4; i++ {
+ t.Square(&t) // 2^10 - 2^5
+ }
+ z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
+
+ t.Square(&z2_10_0) // 2^11 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^20 - 2^10
+ }
+ z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
+
+ t.Square(&z2_20_0) // 2^21 - 2^1
+ for i := 0; i < 19; i++ {
+ t.Square(&t) // 2^40 - 2^20
+ }
+ t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
+
+ t.Square(&t) // 2^41 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^50 - 2^10
+ }
+ z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
+
+ t.Square(&z2_50_0) // 2^51 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^100 - 2^50
+ }
+ z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
+
+ t.Square(&z2_100_0) // 2^101 - 2^1
+ for i := 0; i < 99; i++ {
+ t.Square(&t) // 2^200 - 2^100
+ }
+ t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
+
+ t.Square(&t) // 2^201 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^250 - 2^50
+ }
+ t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
+
+ t.Square(&t) // 2^251 - 2^1
+ t.Square(&t) // 2^252 - 2^2
+ t.Square(&t) // 2^253 - 2^3
+ t.Square(&t) // 2^254 - 2^4
+ t.Square(&t) // 2^255 - 2^5
+
+ return v.Multiply(&t, &z11) // 2^255 - 21
+}
+
+// Set sets v = a, and returns v.
+func (v *Element) Set(a *Element) *Element {
+ *v = *a
+ return v
+}
+
+// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
+// not of the right length, SetBytes returns nil and an error, and the
+// receiver is unchanged.
+//
+// Consistent with RFC 7748, the most significant bit (the high bit of the
+// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
+// are accepted. Note that this is laxer than specified by RFC 8032, but
+// consistent with most Ed25519 implementations.
+func (v *Element) SetBytes(x []byte) (*Element, error) {
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid field element input size")
+ }
+
+ // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
+ v.l0 = binary.LittleEndian.Uint64(x[0:8])
+ v.l0 &= maskLow51Bits
+ // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
+ v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
+ v.l1 &= maskLow51Bits
+ // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
+ v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
+ v.l2 &= maskLow51Bits
+ // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
+ v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
+ v.l3 &= maskLow51Bits
+ // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
+ // Note: not bytes 25:33, shift 4, to avoid overread.
+ v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
+ v.l4 &= maskLow51Bits
+
+ return v, nil
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of v.
+func (v *Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [32]byte
+ return v.bytes(&out)
+}
+
+func (v *Element) bytes(out *[32]byte) []byte {
+ t := *v
+ t.reduce()
+
+ var buf [8]byte
+ for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
+ bitsOffset := i * 51
+ binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
+ for i, bb := range buf {
+ off := bitsOffset/8 + i
+ if off >= len(out) {
+ break
+ }
+ out[off] |= bb
+ }
+ }
+
+ return out[:]
+}
+
+// Equal returns 1 if v and u are equal, and 0 otherwise.
+func (v *Element) Equal(u *Element) int {
+ sa, sv := u.Bytes(), v.Bytes()
+ return subtle.ConstantTimeCompare(sa, sv)
+}
+
+// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
+func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *Element) Select(a, b *Element, cond int) *Element {
+ m := mask64Bits(cond)
+ v.l0 = (m & a.l0) | (^m & b.l0)
+ v.l1 = (m & a.l1) | (^m & b.l1)
+ v.l2 = (m & a.l2) | (^m & b.l2)
+ v.l3 = (m & a.l3) | (^m & b.l3)
+ v.l4 = (m & a.l4) | (^m & b.l4)
+ return v
+}
+
+// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
+func (v *Element) Swap(u *Element, cond int) {
+ m := mask64Bits(cond)
+ t := m & (v.l0 ^ u.l0)
+ v.l0 ^= t
+ u.l0 ^= t
+ t = m & (v.l1 ^ u.l1)
+ v.l1 ^= t
+ u.l1 ^= t
+ t = m & (v.l2 ^ u.l2)
+ v.l2 ^= t
+ u.l2 ^= t
+ t = m & (v.l3 ^ u.l3)
+ v.l3 ^= t
+ u.l3 ^= t
+ t = m & (v.l4 ^ u.l4)
+ v.l4 ^= t
+ u.l4 ^= t
+}
+
+// IsNegative returns 1 if v is negative, and 0 otherwise.
+func (v *Element) IsNegative() int {
+ return int(v.Bytes()[0] & 1)
+}
+
+// Absolute sets v to |u|, and returns v.
+func (v *Element) Absolute(u *Element) *Element {
+ return v.Select(new(Element).Negate(u), u, u.IsNegative())
+}
+
+// Multiply sets v = x * y, and returns v.
+func (v *Element) Multiply(x, y *Element) *Element {
+ feMul(v, x, y)
+ return v
+}
+
+// Square sets v = x * x, and returns v.
+func (v *Element) Square(x *Element) *Element {
+ feSquare(v, x)
+ return v
+}
+
+// Mult32 sets v = x * y, and returns v.
+func (v *Element) Mult32(x *Element, y uint32) *Element {
+ x0lo, x0hi := mul51(x.l0, y)
+ x1lo, x1hi := mul51(x.l1, y)
+ x2lo, x2hi := mul51(x.l2, y)
+ x3lo, x3hi := mul51(x.l3, y)
+ x4lo, x4hi := mul51(x.l4, y)
+ v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
+ v.l1 = x1lo + x0hi
+ v.l2 = x2lo + x1hi
+ v.l3 = x3lo + x2hi
+ v.l4 = x4lo + x3hi
+ // The hi portions are going to be only 32 bits, plus any previous excess,
+ // so we can skip the carry propagation.
+ return v
+}
+
+// mul51 returns lo + hi * 2⁵¹ = a * b.
+func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
+ mh, ml := bits.Mul64(a, uint64(b))
+ lo = ml & maskLow51Bits
+ hi = (mh << 13) | (ml >> 51)
+ return
+}
+
+// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
+func (v *Element) Pow22523(x *Element) *Element {
+ var t0, t1, t2 Element
+
+ t0.Square(x) // x^2
+ t1.Square(&t0) // x^4
+ t1.Square(&t1) // x^8
+ t1.Multiply(x, &t1) // x^9
+ t0.Multiply(&t0, &t1) // x^11
+ t0.Square(&t0) // x^22
+ t0.Multiply(&t1, &t0) // x^31
+ t1.Square(&t0) // x^62
+ for i := 1; i < 5; i++ { // x^992
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
+ t1.Square(&t0) // 2^11 - 2
+ for i := 1; i < 10; i++ { // 2^20 - 2^10
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^20 - 1
+ t2.Square(&t1) // 2^21 - 2
+ for i := 1; i < 20; i++ { // 2^40 - 2^20
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^40 - 1
+ t1.Square(&t1) // 2^41 - 2
+ for i := 1; i < 10; i++ { // 2^50 - 2^10
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^50 - 1
+ t1.Square(&t0) // 2^51 - 2
+ for i := 1; i < 50; i++ { // 2^100 - 2^50
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^100 - 1
+ t2.Square(&t1) // 2^101 - 2
+ for i := 1; i < 100; i++ { // 2^200 - 2^100
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^200 - 1
+ t1.Square(&t1) // 2^201 - 2
+ for i := 1; i < 50; i++ { // 2^250 - 2^50
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^250 - 1
+ t0.Square(&t0) // 2^251 - 2
+ t0.Square(&t0) // 2^252 - 4
+ return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
+}
+
+// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
+var sqrtM1 = &Element{1718705420411056, 234908883556509,
+ 2233514472574048, 2117202627021982, 765476049583133}
+
+// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
+//
+// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
+// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
+// and returns r and 0.
+func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
+ t0 := new(Element)
+
+ // r = (u * v3) * (u * v7)^((p-5)/8)
+ v2 := new(Element).Square(v)
+ uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
+ uv7 := new(Element).Multiply(uv3, t0.Square(v2))
+ rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
+
+ check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
+
+ uNeg := new(Element).Negate(u)
+ correctSignSqrt := check.Equal(u)
+ flippedSignSqrt := check.Equal(uNeg)
+ flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
+
+ rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
+ // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
+ rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
+
+ r.Absolute(rr) // Choose the nonnegative square root.
+ return r, correctSignSqrt | flippedSignSqrt
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_alias_test.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_alias_test.go
new file mode 100644
index 0000000..0c81239
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_alias_test.go
@@ -0,0 +1,140 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import (
+ "testing"
+ "testing/quick"
+)
+
+func checkAliasingOneArg(f func(v, x *Element) *Element) func(v, x Element) bool {
+ return func(v, x Element) bool {
+ x1, v1 := x, x
+
+ // Calculate a reference f(x) without aliasing.
+ if out := f(&v, &x); out != &v && isInBounds(out) {
+ return false
+ }
+
+ // Test aliasing the argument and the receiver.
+ if out := f(&v1, &v1); out != &v1 || v1 != v {
+ return false
+ }
+
+ // Ensure the arguments was not modified.
+ return x == x1
+ }
+}
+
+func checkAliasingTwoArgs(f func(v, x, y *Element) *Element) func(v, x, y Element) bool {
+ return func(v, x, y Element) bool {
+ x1, y1, v1 := x, y, Element{}
+
+ // Calculate a reference f(x, y) without aliasing.
+ if out := f(&v, &x, &y); out != &v && isInBounds(out) {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &y); out != &v1 || v1 != v {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = y
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v {
+ return false
+ }
+
+ // Calculate a reference f(x, x) without aliasing.
+ if out := f(&v, &x, &x); out != &v {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &x); out != &v1 || v1 != v {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = x
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v {
+ return false
+ }
+ // Test aliasing both arguments and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &v1); out != &v1 || v1 != v {
+ return false
+ }
+
+ // Ensure the arguments were not modified.
+ return x == x1 && y == y1
+ }
+}
+
+// TestAliasing checks that receivers and arguments can alias each other without
+// leading to incorrect results. That is, it ensures that it's safe to write
+//
+// v.Invert(v)
+//
+// or
+//
+// v.Add(v, v)
+//
+// without any of the inputs getting clobbered by the output being written.
+func TestAliasing(t *testing.T) {
+ type target struct {
+ name string
+ oneArgF func(v, x *Element) *Element
+ twoArgsF func(v, x, y *Element) *Element
+ }
+ for _, tt := range []target{
+ {name: "Absolute", oneArgF: (*Element).Absolute},
+ {name: "Invert", oneArgF: (*Element).Invert},
+ {name: "Negate", oneArgF: (*Element).Negate},
+ {name: "Set", oneArgF: (*Element).Set},
+ {name: "Square", oneArgF: (*Element).Square},
+ {name: "Pow22523", oneArgF: (*Element).Pow22523},
+ {
+ name: "Mult32",
+ oneArgF: func(v, x *Element) *Element {
+ return v.Mult32(x, 0xffffffff)
+ },
+ },
+ {name: "Multiply", twoArgsF: (*Element).Multiply},
+ {name: "Add", twoArgsF: (*Element).Add},
+ {name: "Subtract", twoArgsF: (*Element).Subtract},
+ {
+ name: "SqrtRatio",
+ twoArgsF: func(v, x, y *Element) *Element {
+ r, _ := v.SqrtRatio(x, y)
+ return r
+ },
+ },
+ {
+ name: "Select0",
+ twoArgsF: func(v, x, y *Element) *Element {
+ return v.Select(x, y, 0)
+ },
+ },
+ {
+ name: "Select1",
+ twoArgsF: func(v, x, y *Element) *Element {
+ return v.Select(x, y, 1)
+ },
+ },
+ } {
+ var err error
+ switch {
+ case tt.oneArgF != nil:
+ err = quick.Check(checkAliasingOneArg(tt.oneArgF), quickCheckConfig(256))
+ case tt.twoArgsF != nil:
+ err = quick.Check(checkAliasingTwoArgs(tt.twoArgsF), quickCheckConfig(256))
+ }
+ if err != nil {
+ t.Errorf("%v: %v", tt.name, err)
+ }
+ }
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64.go
new file mode 100644
index 0000000..edcf163
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64.go
@@ -0,0 +1,16 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+// +build amd64,gc,!purego
+
+package field
+
+// feMul sets out = a * b. It works like feMulGeneric.
+//
+//go:noescape
+func feMul(out *Element, a *Element, b *Element)
+
+// feSquare sets out = a * a. It works like feSquareGeneric.
+//
+//go:noescape
+func feSquare(out *Element, a *Element)
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64.s b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64.s
new file mode 100644
index 0000000..293f013
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64.s
@@ -0,0 +1,379 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+// +build amd64,gc,!purego
+
+#include "textflag.h"
+
+// func feMul(out *Element, a *Element, b *Element)
+TEXT ·feMul(SB), NOSPLIT, $0-24
+ MOVQ a+8(FP), CX
+ MOVQ b+16(FP), BX
+
+ // r0 = a0×b0
+ MOVQ (CX), AX
+ MULQ (BX)
+ MOVQ AX, DI
+ MOVQ DX, SI
+
+ // r0 += 19×a1×b4
+ MOVQ 8(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a2×b3
+ MOVQ 16(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a3×b2
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 16(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a4×b1
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 8(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r1 = a0×b1
+ MOVQ (CX), AX
+ MULQ 8(BX)
+ MOVQ AX, R9
+ MOVQ DX, R8
+
+ // r1 += a1×b0
+ MOVQ 8(CX), AX
+ MULQ (BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a2×b4
+ MOVQ 16(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a3×b3
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a4×b2
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 16(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r2 = a0×b2
+ MOVQ (CX), AX
+ MULQ 16(BX)
+ MOVQ AX, R11
+ MOVQ DX, R10
+
+ // r2 += a1×b1
+ MOVQ 8(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += a2×b0
+ MOVQ 16(CX), AX
+ MULQ (BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += 19×a3×b4
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += 19×a4×b3
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r3 = a0×b3
+ MOVQ (CX), AX
+ MULQ 24(BX)
+ MOVQ AX, R13
+ MOVQ DX, R12
+
+ // r3 += a1×b2
+ MOVQ 8(CX), AX
+ MULQ 16(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += a2×b1
+ MOVQ 16(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += a3×b0
+ MOVQ 24(CX), AX
+ MULQ (BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += 19×a4×b4
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r4 = a0×b4
+ MOVQ (CX), AX
+ MULQ 32(BX)
+ MOVQ AX, R15
+ MOVQ DX, R14
+
+ // r4 += a1×b3
+ MOVQ 8(CX), AX
+ MULQ 24(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a2×b2
+ MOVQ 16(CX), AX
+ MULQ 16(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a3×b1
+ MOVQ 24(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a4×b0
+ MOVQ 32(CX), AX
+ MULQ (BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // First reduction chain
+ MOVQ $0x0007ffffffffffff, AX
+ SHLQ $0x0d, DI, SI
+ SHLQ $0x0d, R9, R8
+ SHLQ $0x0d, R11, R10
+ SHLQ $0x0d, R13, R12
+ SHLQ $0x0d, R15, R14
+ ANDQ AX, DI
+ IMUL3Q $0x13, R14, R14
+ ADDQ R14, DI
+ ANDQ AX, R9
+ ADDQ SI, R9
+ ANDQ AX, R11
+ ADDQ R8, R11
+ ANDQ AX, R13
+ ADDQ R10, R13
+ ANDQ AX, R15
+ ADDQ R12, R15
+
+ // Second reduction chain (carryPropagate)
+ MOVQ DI, SI
+ SHRQ $0x33, SI
+ MOVQ R9, R8
+ SHRQ $0x33, R8
+ MOVQ R11, R10
+ SHRQ $0x33, R10
+ MOVQ R13, R12
+ SHRQ $0x33, R12
+ MOVQ R15, R14
+ SHRQ $0x33, R14
+ ANDQ AX, DI
+ IMUL3Q $0x13, R14, R14
+ ADDQ R14, DI
+ ANDQ AX, R9
+ ADDQ SI, R9
+ ANDQ AX, R11
+ ADDQ R8, R11
+ ANDQ AX, R13
+ ADDQ R10, R13
+ ANDQ AX, R15
+ ADDQ R12, R15
+
+ // Store output
+ MOVQ out+0(FP), AX
+ MOVQ DI, (AX)
+ MOVQ R9, 8(AX)
+ MOVQ R11, 16(AX)
+ MOVQ R13, 24(AX)
+ MOVQ R15, 32(AX)
+ RET
+
+// func feSquare(out *Element, a *Element)
+TEXT ·feSquare(SB), NOSPLIT, $0-16
+ MOVQ a+8(FP), CX
+
+ // r0 = l0×l0
+ MOVQ (CX), AX
+ MULQ (CX)
+ MOVQ AX, SI
+ MOVQ DX, BX
+
+ // r0 += 38×l1×l4
+ MOVQ 8(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, SI
+ ADCQ DX, BX
+
+ // r0 += 38×l2×l3
+ MOVQ 16(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, SI
+ ADCQ DX, BX
+
+ // r1 = 2×l0×l1
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 8(CX)
+ MOVQ AX, R8
+ MOVQ DX, DI
+
+ // r1 += 38×l2×l4
+ MOVQ 16(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R8
+ ADCQ DX, DI
+
+ // r1 += 19×l3×l3
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, R8
+ ADCQ DX, DI
+
+ // r2 = 2×l0×l2
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 16(CX)
+ MOVQ AX, R10
+ MOVQ DX, R9
+
+ // r2 += l1×l1
+ MOVQ 8(CX), AX
+ MULQ 8(CX)
+ ADDQ AX, R10
+ ADCQ DX, R9
+
+ // r2 += 38×l3×l4
+ MOVQ 24(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R10
+ ADCQ DX, R9
+
+ // r3 = 2×l0×l3
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 24(CX)
+ MOVQ AX, R12
+ MOVQ DX, R11
+
+ // r3 += 2×l1×l2
+ MOVQ 8(CX), AX
+ IMUL3Q $0x02, AX, AX
+ MULQ 16(CX)
+ ADDQ AX, R12
+ ADCQ DX, R11
+
+ // r3 += 19×l4×l4
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R12
+ ADCQ DX, R11
+
+ // r4 = 2×l0×l4
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 32(CX)
+ MOVQ AX, R14
+ MOVQ DX, R13
+
+ // r4 += 2×l1×l3
+ MOVQ 8(CX), AX
+ IMUL3Q $0x02, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, R14
+ ADCQ DX, R13
+
+ // r4 += l2×l2
+ MOVQ 16(CX), AX
+ MULQ 16(CX)
+ ADDQ AX, R14
+ ADCQ DX, R13
+
+ // First reduction chain
+ MOVQ $0x0007ffffffffffff, AX
+ SHLQ $0x0d, SI, BX
+ SHLQ $0x0d, R8, DI
+ SHLQ $0x0d, R10, R9
+ SHLQ $0x0d, R12, R11
+ SHLQ $0x0d, R14, R13
+ ANDQ AX, SI
+ IMUL3Q $0x13, R13, R13
+ ADDQ R13, SI
+ ANDQ AX, R8
+ ADDQ BX, R8
+ ANDQ AX, R10
+ ADDQ DI, R10
+ ANDQ AX, R12
+ ADDQ R9, R12
+ ANDQ AX, R14
+ ADDQ R11, R14
+
+ // Second reduction chain (carryPropagate)
+ MOVQ SI, BX
+ SHRQ $0x33, BX
+ MOVQ R8, DI
+ SHRQ $0x33, DI
+ MOVQ R10, R9
+ SHRQ $0x33, R9
+ MOVQ R12, R11
+ SHRQ $0x33, R11
+ MOVQ R14, R13
+ SHRQ $0x33, R13
+ ANDQ AX, SI
+ IMUL3Q $0x13, R13, R13
+ ADDQ R13, SI
+ ANDQ AX, R8
+ ADDQ BX, R8
+ ANDQ AX, R10
+ ADDQ DI, R10
+ ANDQ AX, R12
+ ADDQ R9, R12
+ ANDQ AX, R14
+ ADDQ R11, R14
+
+ // Store output
+ MOVQ out+0(FP), AX
+ MOVQ SI, (AX)
+ MOVQ R8, 8(AX)
+ MOVQ R10, 16(AX)
+ MOVQ R12, 24(AX)
+ MOVQ R14, 32(AX)
+ RET
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64_noasm.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64_noasm.go
new file mode 100644
index 0000000..ddb6c9b
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_amd64_noasm.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || !gc || purego
+// +build !amd64 !gc purego
+
+package field
+
+func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
+
+func feSquare(v, x *Element) { feSquareGeneric(v, x) }
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64.go
new file mode 100644
index 0000000..af459ef
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && gc && !purego
+// +build arm64,gc,!purego
+
+package field
+
+//go:noescape
+func carryPropagate(v *Element)
+
+func (v *Element) carryPropagate() *Element {
+ carryPropagate(v)
+ return v
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64.s b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64.s
new file mode 100644
index 0000000..3126a43
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64.s
@@ -0,0 +1,42 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && gc && !purego
+
+#include "textflag.h"
+
+// carryPropagate works exactly like carryPropagateGeneric and uses the
+// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
+// avoids loading R0-R4 twice and uses LDP and STP.
+//
+// See https://golang.org/issues/43145 for the main compiler issue.
+//
+// func carryPropagate(v *Element)
+TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
+ MOVD v+0(FP), R20
+
+ LDP 0(R20), (R0, R1)
+ LDP 16(R20), (R2, R3)
+ MOVD 32(R20), R4
+
+ AND $0x7ffffffffffff, R0, R10
+ AND $0x7ffffffffffff, R1, R11
+ AND $0x7ffffffffffff, R2, R12
+ AND $0x7ffffffffffff, R3, R13
+ AND $0x7ffffffffffff, R4, R14
+
+ ADD R0>>51, R11, R11
+ ADD R1>>51, R12, R12
+ ADD R2>>51, R13, R13
+ ADD R3>>51, R14, R14
+ // R4>>51 * 19 + R10 -> R10
+ LSR $51, R4, R21
+ MOVD $19, R22
+ MADD R22, R10, R21, R10
+
+ STP (R10, R11), 0(R20)
+ STP (R12, R13), 16(R20)
+ MOVD R14, 32(R20)
+
+ RET
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64_noasm.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64_noasm.go
new file mode 100644
index 0000000..234a5b2
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_arm64_noasm.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !arm64 || !gc || purego
+// +build !arm64 !gc purego
+
+package field
+
+func (v *Element) carryPropagate() *Element {
+ return v.carryPropagateGeneric()
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_bench_test.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_bench_test.go
new file mode 100644
index 0000000..84fdf05
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_bench_test.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "testing"
+
+func BenchmarkAdd(b *testing.B) {
+ x := new(Element).One()
+ y := new(Element).Add(x, x)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Add(x, y)
+ }
+}
+
+func BenchmarkMultiply(b *testing.B) {
+ x := new(Element).One()
+ y := new(Element).Add(x, x)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Multiply(x, y)
+ }
+}
+
+func BenchmarkSquare(b *testing.B) {
+ x := new(Element).Add(feOne, feOne)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Square(x)
+ }
+}
+
+func BenchmarkInvert(b *testing.B) {
+ x := new(Element).Add(feOne, feOne)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Invert(x)
+ }
+}
+
+func BenchmarkMult32(b *testing.B) {
+ x := new(Element).One()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Mult32(x, 0xaa42aa42)
+ }
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_extra.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_extra.go
new file mode 100644
index 0000000..1ef503b
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_extra.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "errors"
+
+// This file contains additional functionality that is not included in the
+// upstream crypto/ed25519/edwards25519/field package.
+
+// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which
+// is reduced modulo the field order. If x is not of the right length,
+// SetWideBytes returns nil and an error, and the receiver is unchanged.
+//
+// SetWideBytes is not necessary to select a uniformly distributed value, and is
+// only provided for compatibility: SetBytes can be used instead as the chance
+// of bias is less than 2⁻²⁵⁰.
+func (v *Element) SetWideBytes(x []byte) (*Element, error) {
+ if len(x) != 64 {
+ return nil, errors.New("edwards25519: invalid SetWideBytes input size")
+ }
+
+ // Split the 64 bytes into two elements, and extract the most significant
+ // bit of each, which is ignored by SetBytes.
+ lo, _ := new(Element).SetBytes(x[:32])
+ loMSB := uint64(x[31] >> 7)
+ hi, _ := new(Element).SetBytes(x[32:])
+ hiMSB := uint64(x[63] >> 7)
+
+ // The output we want is
+ //
+ // v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹
+ //
+ // which applying the reduction identity comes out to
+ //
+ // v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19²
+ //
+ // l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value
+ // (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value
+ // (hiMSB * 2 * 19²), so it fits in a uint64.
+
+ v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19
+ v.l1 = lo.l1 + hi.l1*2*19
+ v.l2 = lo.l2 + hi.l2*2*19
+ v.l3 = lo.l3 + hi.l3*2*19
+ v.l4 = lo.l4 + hi.l4*2*19
+
+ return v.carryPropagate(), nil
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_extra_test.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_extra_test.go
new file mode 100644
index 0000000..7d8bea0
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_extra_test.go
@@ -0,0 +1,37 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import (
+ "math/big"
+ "testing"
+ "testing/quick"
+)
+
+var bigP = new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 255), big.NewInt(19))
+
+func TestSetWideBytes(t *testing.T) {
+ f1 := func(in [64]byte, fe Element) bool {
+ fe1 := new(Element).Set(&fe)
+
+ if out, err := fe.SetWideBytes([]byte{42}); err == nil || out != nil ||
+ fe.Equal(fe1) != 1 {
+ return false
+ }
+
+ if out, err := fe.SetWideBytes(in[:]); err != nil || out != &fe {
+ return false
+ }
+
+ b := new(big.Int).SetBytes(swapEndianness(in[:]))
+ fe1.fromBig(b.Mod(b, bigP))
+
+ return fe.Equal(fe1) == 1 && isInBounds(&fe) && isInBounds(fe1)
+ }
+ if err := quick.Check(f1, nil); err != nil {
+ t.Error(err)
+ }
+
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_generic.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_generic.go
new file mode 100644
index 0000000..86f5fd9
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_generic.go
@@ -0,0 +1,266 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "math/bits"
+
+// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
+// bits.Mul64 and bits.Add64 intrinsics.
+type uint128 struct {
+ lo, hi uint64
+}
+
+// mul64 returns a * b.
+func mul64(a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ return uint128{lo, hi}
+}
+
+// addMul64 returns v + a * b.
+func addMul64(v uint128, a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ lo, c := bits.Add64(lo, v.lo, 0)
+ hi, _ = bits.Add64(hi, v.hi, c)
+ return uint128{lo, hi}
+}
+
+// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
+func shiftRightBy51(a uint128) uint64 {
+ return (a.hi << (64 - 51)) | (a.lo >> 51)
+}
+
+func feMulGeneric(v, a, b *Element) {
+ a0 := a.l0
+ a1 := a.l1
+ a2 := a.l2
+ a3 := a.l3
+ a4 := a.l4
+
+ b0 := b.l0
+ b1 := b.l1
+ b2 := b.l2
+ b3 := b.l3
+ b4 := b.l4
+
+ // Limb multiplication works like pen-and-paper columnar multiplication, but
+ // with 51-bit limbs instead of digits.
+ //
+ // a4 a3 a2 a1 a0 x
+ // b4 b3 b2 b1 b0 =
+ // ------------------------
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a4b1 a3b1 a2b1 a1b1 a0b1 +
+ // a4b2 a3b2 a2b2 a1b2 a0b2 +
+ // a4b3 a3b3 a2b3 a1b3 a0b3 +
+ // a4b4 a3b4 a2b4 a1b4 a0b4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
+ // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
+ // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
+ //
+ // Reduction can be carried out simultaneously to multiplication. For
+ // example, we do not compute r5: whenever the result of a multiplication
+ // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
+ //
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a3b1 a2b1 a1b1 a0b1 19×a4b1 +
+ // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
+ // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
+ // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // Finally we add up the columns into wide, overlapping limbs.
+
+ a1_19 := a1 * 19
+ a2_19 := a2 * 19
+ a3_19 := a3 * 19
+ a4_19 := a4 * 19
+
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ r0 := mul64(a0, b0)
+ r0 = addMul64(r0, a1_19, b4)
+ r0 = addMul64(r0, a2_19, b3)
+ r0 = addMul64(r0, a3_19, b2)
+ r0 = addMul64(r0, a4_19, b1)
+
+ // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
+ r1 := mul64(a0, b1)
+ r1 = addMul64(r1, a1, b0)
+ r1 = addMul64(r1, a2_19, b4)
+ r1 = addMul64(r1, a3_19, b3)
+ r1 = addMul64(r1, a4_19, b2)
+
+ // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
+ r2 := mul64(a0, b2)
+ r2 = addMul64(r2, a1, b1)
+ r2 = addMul64(r2, a2, b0)
+ r2 = addMul64(r2, a3_19, b4)
+ r2 = addMul64(r2, a4_19, b3)
+
+ // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
+ r3 := mul64(a0, b3)
+ r3 = addMul64(r3, a1, b2)
+ r3 = addMul64(r3, a2, b1)
+ r3 = addMul64(r3, a3, b0)
+ r3 = addMul64(r3, a4_19, b4)
+
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ r4 := mul64(a0, b4)
+ r4 = addMul64(r4, a1, b3)
+ r4 = addMul64(r4, a2, b2)
+ r4 = addMul64(r4, a3, b1)
+ r4 = addMul64(r4, a4, b0)
+
+ // After the multiplication, we need to reduce (carry) the five coefficients
+ // to obtain a result with limbs that are at most slightly larger than 2⁵¹,
+ // to respect the Element invariant.
+ //
+ // Overall, the reduction works the same as carryPropagate, except with
+ // wider inputs: we take the carry for each coefficient by shifting it right
+ // by 51, and add it to the limb above it. The top carry is multiplied by 19
+ // according to the reduction identity and added to the lowest limb.
+ //
+ // The largest coefficient (r0) will be at most 111 bits, which guarantees
+ // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
+ //
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
+ // r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
+ // r0 < 2⁷ × 2⁵² × 2⁵²
+ // r0 < 2¹¹¹
+ //
+ // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
+ // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
+ // allows us to easily apply the reduction identity.
+ //
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ // r4 < 5 × 2⁵² × 2⁵²
+ // r4 < 2¹⁰⁷
+ //
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ // Now all coefficients fit into 64-bit registers but are still too large to
+ // be passed around as an Element. We therefore do one last carry chain,
+ // where the carries will be small enough to fit in the wiggle room above 2⁵¹.
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+func feSquareGeneric(v, a *Element) {
+ l0 := a.l0
+ l1 := a.l1
+ l2 := a.l2
+ l3 := a.l3
+ l4 := a.l4
+
+ // Squaring works precisely like multiplication above, but thanks to its
+ // symmetry we get to group a few terms together.
+ //
+ // l4 l3 l2 l1 l0 x
+ // l4 l3 l2 l1 l0 =
+ // ------------------------
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l4l1 l3l1 l2l1 l1l1 l0l1 +
+ // l4l2 l3l2 l2l2 l1l2 l0l2 +
+ // l4l3 l3l3 l2l3 l1l3 l0l3 +
+ // l4l4 l3l4 l2l4 l1l4 l0l4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l3l1 l2l1 l1l1 l0l1 19×l4l1 +
+ // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
+ // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
+ // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
+ // only three Mul64 and four Add64, instead of five and eight.
+
+ l0_2 := l0 * 2
+ l1_2 := l1 * 2
+
+ l1_38 := l1 * 38
+ l2_38 := l2 * 38
+ l3_38 := l3 * 38
+
+ l3_19 := l3 * 19
+ l4_19 := l4 * 19
+
+ // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
+ r0 := mul64(l0, l0)
+ r0 = addMul64(r0, l1_38, l4)
+ r0 = addMul64(r0, l2_38, l3)
+
+ // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
+ r1 := mul64(l0_2, l1)
+ r1 = addMul64(r1, l2_38, l4)
+ r1 = addMul64(r1, l3_19, l3)
+
+ // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
+ r2 := mul64(l0_2, l2)
+ r2 = addMul64(r2, l1, l1)
+ r2 = addMul64(r2, l3_38, l4)
+
+ // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
+ r3 := mul64(l0_2, l3)
+ r3 = addMul64(r3, l1_2, l2)
+ r3 = addMul64(r3, l4_19, l4)
+
+ // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
+ r4 := mul64(l0_2, l4)
+ r4 = addMul64(r4, l1_2, l3)
+ r4 = addMul64(r4, l2, l2)
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
+// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
+func (v *Element) carryPropagateGeneric() *Element {
+ c0 := v.l0 >> 51
+ c1 := v.l1 >> 51
+ c2 := v.l2 >> 51
+ c3 := v.l3 >> 51
+ c4 := v.l4 >> 51
+
+ // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
+ // the final l0 will be at most 52 bits. Similarly for the rest.
+ v.l0 = v.l0&maskLow51Bits + c4*19
+ v.l1 = v.l1&maskLow51Bits + c0
+ v.l2 = v.l2&maskLow51Bits + c1
+ v.l3 = v.l3&maskLow51Bits + c2
+ v.l4 = v.l4&maskLow51Bits + c3
+
+ return v
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_test.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_test.go
new file mode 100644
index 0000000..a24fbfe
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/field/fe_test.go
@@ -0,0 +1,566 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "io"
+ "math/big"
+ "math/bits"
+ mathrand "math/rand"
+ "reflect"
+ "testing"
+ "testing/quick"
+)
+
+func (v Element) String() string {
+ return hex.EncodeToString(v.Bytes())
+}
+
+// quickCheckConfig returns a quick.Config that scales the max count by the
+// given factor if the -short flag is not set.
+func quickCheckConfig(slowScale int) *quick.Config {
+ cfg := new(quick.Config)
+ if !testing.Short() {
+ cfg.MaxCountScale = float64(slowScale)
+ }
+ return cfg
+}
+
+func generateFieldElement(rand *mathrand.Rand) Element {
+ const maskLow52Bits = (1 << 52) - 1
+ return Element{
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ }
+}
+
+// weirdLimbs can be combined to generate a range of edge-case field elements.
+// 0 and -1 are intentionally more weighted, as they combine well.
+var (
+ weirdLimbs51 = []uint64{
+ 0, 0, 0, 0,
+ 1,
+ 19 - 1,
+ 19,
+ 0x2aaaaaaaaaaaa,
+ 0x5555555555555,
+ (1 << 51) - 20,
+ (1 << 51) - 19,
+ (1 << 51) - 1, (1 << 51) - 1,
+ (1 << 51) - 1, (1 << 51) - 1,
+ }
+ weirdLimbs52 = []uint64{
+ 0, 0, 0, 0, 0, 0,
+ 1,
+ 19 - 1,
+ 19,
+ 0x2aaaaaaaaaaaa,
+ 0x5555555555555,
+ (1 << 51) - 20,
+ (1 << 51) - 19,
+ (1 << 51) - 1, (1 << 51) - 1,
+ (1 << 51) - 1, (1 << 51) - 1,
+ (1 << 51) - 1, (1 << 51) - 1,
+ 1 << 51,
+ (1 << 51) + 1,
+ (1 << 52) - 19,
+ (1 << 52) - 1,
+ }
+)
+
+func generateWeirdFieldElement(rand *mathrand.Rand) Element {
+ return Element{
+ weirdLimbs52[rand.Intn(len(weirdLimbs52))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ }
+}
+
+func (Element) Generate(rand *mathrand.Rand, size int) reflect.Value {
+ if rand.Intn(2) == 0 {
+ return reflect.ValueOf(generateWeirdFieldElement(rand))
+ }
+ return reflect.ValueOf(generateFieldElement(rand))
+}
+
+// isInBounds returns whether the element is within the expected bit size bounds
+// after a light reduction.
+func isInBounds(x *Element) bool {
+ return bits.Len64(x.l0) <= 52 &&
+ bits.Len64(x.l1) <= 52 &&
+ bits.Len64(x.l2) <= 52 &&
+ bits.Len64(x.l3) <= 52 &&
+ bits.Len64(x.l4) <= 52
+}
+
+func TestMultiplyDistributesOverAdd(t *testing.T) {
+ multiplyDistributesOverAdd := func(x, y, z Element) bool {
+ // Compute t1 = (x+y)*z
+ t1 := new(Element)
+ t1.Add(&x, &y)
+ t1.Multiply(t1, &z)
+
+ // Compute t2 = x*z + y*z
+ t2 := new(Element)
+ t3 := new(Element)
+ t2.Multiply(&x, &z)
+ t3.Multiply(&y, &z)
+ t2.Add(t2, t3)
+
+ return t1.Equal(t2) == 1 && isInBounds(t1) && isInBounds(t2)
+ }
+
+ if err := quick.Check(multiplyDistributesOverAdd, quickCheckConfig(1024)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestMul64to128(t *testing.T) {
+ a := uint64(5)
+ b := uint64(5)
+ r := mul64(a, b)
+ if r.lo != 0x19 || r.hi != 0 {
+ t.Errorf("lo-range wide mult failed, got %d + %d*(2**64)", r.lo, r.hi)
+ }
+
+ a = uint64(18014398509481983) // 2^54 - 1
+ b = uint64(18014398509481983) // 2^54 - 1
+ r = mul64(a, b)
+ if r.lo != 0xff80000000000001 || r.hi != 0xfffffffffff {
+ t.Errorf("hi-range wide mult failed, got %d + %d*(2**64)", r.lo, r.hi)
+ }
+
+ a = uint64(1125899906842661)
+ b = uint64(2097155)
+ r = mul64(a, b)
+ r = addMul64(r, a, b)
+ r = addMul64(r, a, b)
+ r = addMul64(r, a, b)
+ r = addMul64(r, a, b)
+ if r.lo != 16888498990613035 || r.hi != 640 {
+ t.Errorf("wrong answer: %d + %d*(2**64)", r.lo, r.hi)
+ }
+}
+
+func TestSetBytesRoundTrip(t *testing.T) {
+ f1 := func(in [32]byte, fe Element) bool {
+ fe.SetBytes(in[:])
+
+ // Mask the most significant bit as it's ignored by SetBytes. (Now
+ // instead of earlier so we check the masking in SetBytes is working.)
+ in[len(in)-1] &= (1 << 7) - 1
+
+ return bytes.Equal(in[:], fe.Bytes()) && isInBounds(&fe)
+ }
+ if err := quick.Check(f1, nil); err != nil {
+ t.Errorf("failed bytes->FE->bytes round-trip: %v", err)
+ }
+
+ f2 := func(fe, r Element) bool {
+ r.SetBytes(fe.Bytes())
+
+ // Intentionally not using Equal not to go through Bytes again.
+ // Calling reduce because both Generate and SetBytes can produce
+ // non-canonical representations.
+ fe.reduce()
+ r.reduce()
+ return fe == r
+ }
+ if err := quick.Check(f2, nil); err != nil {
+ t.Errorf("failed FE->bytes->FE round-trip: %v", err)
+ }
+
+ // Check some fixed vectors from dalek
+ type feRTTest struct {
+ fe Element
+ b []byte
+ }
+ var tests = []feRTTest{
+ {
+ fe: Element{358744748052810, 1691584618240980, 977650209285361, 1429865912637724, 560044844278676},
+ b: []byte{74, 209, 69, 197, 70, 70, 161, 222, 56, 226, 229, 19, 112, 60, 25, 92, 187, 74, 222, 56, 50, 153, 51, 233, 40, 74, 57, 6, 160, 185, 213, 31},
+ },
+ {
+ fe: Element{84926274344903, 473620666599931, 365590438845504, 1028470286882429, 2146499180330972},
+ b: []byte{199, 23, 106, 112, 61, 77, 216, 79, 186, 60, 11, 118, 13, 16, 103, 15, 42, 32, 83, 250, 44, 57, 204, 198, 78, 199, 253, 119, 146, 172, 3, 122},
+ },
+ }
+
+ for _, tt := range tests {
+ b := tt.fe.Bytes()
+ fe, _ := new(Element).SetBytes(tt.b)
+ if !bytes.Equal(b, tt.b) || fe.Equal(&tt.fe) != 1 {
+ t.Errorf("Failed fixed roundtrip: %v", tt)
+ }
+ }
+}
+
+func swapEndianness(buf []byte) []byte {
+ for i := 0; i < len(buf)/2; i++ {
+ buf[i], buf[len(buf)-i-1] = buf[len(buf)-i-1], buf[i]
+ }
+ return buf
+}
+
+func TestBytesBigEquivalence(t *testing.T) {
+ f1 := func(in [32]byte, fe, fe1 Element) bool {
+ fe.SetBytes(in[:])
+
+ in[len(in)-1] &= (1 << 7) - 1 // mask the most significant bit
+ b := new(big.Int).SetBytes(swapEndianness(in[:]))
+ fe1.fromBig(b)
+
+ if fe != fe1 {
+ return false
+ }
+
+ buf := make([]byte, 32)
+ buf = swapEndianness(fe1.toBig().FillBytes(buf))
+
+ return bytes.Equal(fe.Bytes(), buf) && isInBounds(&fe) && isInBounds(&fe1)
+ }
+ if err := quick.Check(f1, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+// fromBig sets v = n, and returns v. The bit length of n must not exceed 256.
+func (v *Element) fromBig(n *big.Int) *Element {
+ if n.BitLen() > 32*8 {
+ panic("edwards25519: invalid field element input size")
+ }
+
+ buf := make([]byte, 0, 32)
+ for _, word := range n.Bits() {
+ for i := 0; i < bits.UintSize; i += 8 {
+ if len(buf) >= cap(buf) {
+ break
+ }
+ buf = append(buf, byte(word))
+ word >>= 8
+ }
+ }
+
+ v.SetBytes(buf[:32])
+ return v
+}
+
+func (v *Element) fromDecimal(s string) *Element {
+ n, ok := new(big.Int).SetString(s, 10)
+ if !ok {
+ panic("not a valid decimal: " + s)
+ }
+ return v.fromBig(n)
+}
+
+// toBig returns v as a big.Int.
+func (v *Element) toBig() *big.Int {
+ buf := v.Bytes()
+
+ words := make([]big.Word, 32*8/bits.UintSize)
+ for n := range words {
+ for i := 0; i < bits.UintSize; i += 8 {
+ if len(buf) == 0 {
+ break
+ }
+ words[n] |= big.Word(buf[0]) << big.Word(i)
+ buf = buf[1:]
+ }
+ }
+
+ return new(big.Int).SetBits(words)
+}
+
+func TestDecimalConstants(t *testing.T) {
+ sqrtM1String := "19681161376707505956807079304988542015446066515923890162744021073123829784752"
+ if exp := new(Element).fromDecimal(sqrtM1String); sqrtM1.Equal(exp) != 1 {
+ t.Errorf("sqrtM1 is %v, expected %v", sqrtM1, exp)
+ }
+ // d is in the parent package, and we don't want to expose d or fromDecimal.
+ // dString := "37095705934669439343138083508754565189542113879843219016388785533085940283555"
+ // if exp := new(Element).fromDecimal(dString); d.Equal(exp) != 1 {
+ // t.Errorf("d is %v, expected %v", d, exp)
+ // }
+}
+
+func TestSetBytesRoundTripEdgeCases(t *testing.T) {
+ // TODO: values close to 0, close to 2^255-19, between 2^255-19 and 2^255-1,
+ // and between 2^255 and 2^256-1. Test both the documented SetBytes
+ // behavior, and that Bytes reduces them.
+}
+
+// Tests self-consistency between Multiply and Square.
+func TestConsistency(t *testing.T) {
+ var x Element
+ var x2, x2sq Element
+
+ x = Element{1, 1, 1, 1, 1}
+ x2.Multiply(&x, &x)
+ x2sq.Square(&x)
+
+ if x2 != x2sq {
+ t.Fatalf("all ones failed\nmul: %x\nsqr: %x\n", x2, x2sq)
+ }
+
+ var bytes [32]byte
+
+ _, err := io.ReadFull(rand.Reader, bytes[:])
+ if err != nil {
+ t.Fatal(err)
+ }
+ x.SetBytes(bytes[:])
+
+ x2.Multiply(&x, &x)
+ x2sq.Square(&x)
+
+ if x2 != x2sq {
+ t.Fatalf("all ones failed\nmul: %x\nsqr: %x\n", x2, x2sq)
+ }
+}
+
+func TestEqual(t *testing.T) {
+ x := Element{1, 1, 1, 1, 1}
+ y := Element{5, 4, 3, 2, 1}
+
+ eq := x.Equal(&x)
+ if eq != 1 {
+ t.Errorf("wrong about equality")
+ }
+
+ eq = x.Equal(&y)
+ if eq != 0 {
+ t.Errorf("wrong about inequality")
+ }
+}
+
+func TestInvert(t *testing.T) {
+ x := Element{1, 1, 1, 1, 1}
+ one := Element{1, 0, 0, 0, 0}
+ var xinv, r Element
+
+ xinv.Invert(&x)
+ r.Multiply(&x, &xinv)
+ r.reduce()
+
+ if one != r {
+ t.Errorf("inversion identity failed, got: %x", r)
+ }
+
+ var bytes [32]byte
+
+ _, err := io.ReadFull(rand.Reader, bytes[:])
+ if err != nil {
+ t.Fatal(err)
+ }
+ x.SetBytes(bytes[:])
+
+ xinv.Invert(&x)
+ r.Multiply(&x, &xinv)
+ r.reduce()
+
+ if one != r {
+ t.Errorf("random inversion identity failed, got: %x for field element %x", r, x)
+ }
+
+ zero := Element{}
+ x.Set(&zero)
+ if xx := xinv.Invert(&x); xx != &xinv {
+ t.Errorf("inverting zero did not return the receiver")
+ } else if xinv.Equal(&zero) != 1 {
+ t.Errorf("inverting zero did not return zero")
+ }
+}
+
+func TestSelectSwap(t *testing.T) {
+ a := Element{358744748052810, 1691584618240980, 977650209285361, 1429865912637724, 560044844278676}
+ b := Element{84926274344903, 473620666599931, 365590438845504, 1028470286882429, 2146499180330972}
+
+ var c, d Element
+
+ c.Select(&a, &b, 1)
+ d.Select(&a, &b, 0)
+
+ if c.Equal(&a) != 1 || d.Equal(&b) != 1 {
+ t.Errorf("Select failed")
+ }
+
+ c.Swap(&d, 0)
+
+ if c.Equal(&a) != 1 || d.Equal(&b) != 1 {
+ t.Errorf("Swap failed")
+ }
+
+ c.Swap(&d, 1)
+
+ if c.Equal(&b) != 1 || d.Equal(&a) != 1 {
+ t.Errorf("Swap failed")
+ }
+}
+
+func TestMult32(t *testing.T) {
+ mult32EquivalentToMul := func(x Element, y uint32) bool {
+ t1 := new(Element)
+ for i := 0; i < 100; i++ {
+ t1.Mult32(&x, y)
+ }
+
+ ty := new(Element)
+ ty.l0 = uint64(y)
+
+ t2 := new(Element)
+ for i := 0; i < 100; i++ {
+ t2.Multiply(&x, ty)
+ }
+
+ return t1.Equal(t2) == 1 && isInBounds(t1) && isInBounds(t2)
+ }
+
+ if err := quick.Check(mult32EquivalentToMul, quickCheckConfig(1024)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSqrtRatio(t *testing.T) {
+ // From draft-irtf-cfrg-ristretto255-decaf448-00, Appendix A.4.
+ type test struct {
+ u, v string
+ wasSquare int
+ r string
+ }
+ var tests = []test{
+ // If u is 0, the function is defined to return (0, TRUE), even if v
+ // is zero. Note that where used in this package, the denominator v
+ // is never zero.
+ {
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 1, "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ // 0/1 == 0²
+ {
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ 1, "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ // If u is non-zero and v is zero, defined to return (0, FALSE).
+ {
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0, "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ // 2/1 is not square in this field.
+ {
+ "0200000000000000000000000000000000000000000000000000000000000000",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ 0, "3c5ff1b5d8e4113b871bd052f9e7bcd0582804c266ffb2d4f4203eb07fdb7c54",
+ },
+ // 4/1 == 2²
+ {
+ "0400000000000000000000000000000000000000000000000000000000000000",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ 1, "0200000000000000000000000000000000000000000000000000000000000000",
+ },
+ // 1/4 == (2⁻¹)² == (2^(p-2))² per Euler's theorem
+ {
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ "0400000000000000000000000000000000000000000000000000000000000000",
+ 1, "f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3f",
+ },
+ }
+
+ for i, tt := range tests {
+ u, _ := new(Element).SetBytes(decodeHex(tt.u))
+ v, _ := new(Element).SetBytes(decodeHex(tt.v))
+ want, _ := new(Element).SetBytes(decodeHex(tt.r))
+ got, wasSquare := new(Element).SqrtRatio(u, v)
+ if got.Equal(want) == 0 || wasSquare != tt.wasSquare {
+ t.Errorf("%d: got (%v, %v), want (%v, %v)", i, got, wasSquare, want, tt.wasSquare)
+ }
+ }
+}
+
+func TestCarryPropagate(t *testing.T) {
+ asmLikeGeneric := func(a [5]uint64) bool {
+ t1 := &Element{a[0], a[1], a[2], a[3], a[4]}
+ t2 := &Element{a[0], a[1], a[2], a[3], a[4]}
+
+ t1.carryPropagate()
+ t2.carryPropagateGeneric()
+
+ if *t1 != *t2 {
+ t.Logf("got: %#v,\nexpected: %#v", t1, t2)
+ }
+
+ return *t1 == *t2 && isInBounds(t2)
+ }
+
+ if err := quick.Check(asmLikeGeneric, quickCheckConfig(1024)); err != nil {
+ t.Error(err)
+ }
+
+ if !asmLikeGeneric([5]uint64{0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}) {
+ t.Errorf("failed for {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}")
+ }
+}
+
+func TestFeSquare(t *testing.T) {
+ asmLikeGeneric := func(a Element) bool {
+ t1 := a
+ t2 := a
+
+ feSquareGeneric(&t1, &t1)
+ feSquare(&t2, &t2)
+
+ if t1 != t2 {
+ t.Logf("got: %#v,\nexpected: %#v", t1, t2)
+ }
+
+ return t1 == t2 && isInBounds(&t2)
+ }
+
+ if err := quick.Check(asmLikeGeneric, quickCheckConfig(1024)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestFeMul(t *testing.T) {
+ asmLikeGeneric := func(a, b Element) bool {
+ a1 := a
+ a2 := a
+ b1 := b
+ b2 := b
+
+ feMulGeneric(&a1, &a1, &b1)
+ feMul(&a2, &a2, &b2)
+
+ if a1 != a2 || b1 != b2 {
+ t.Logf("got: %#v,\nexpected: %#v", a1, a2)
+ t.Logf("got: %#v,\nexpected: %#v", b1, b2)
+ }
+
+ return a1 == a2 && isInBounds(&a2) &&
+ b1 == b2 && isInBounds(&b2)
+ }
+
+ if err := quick.Check(asmLikeGeneric, quickCheckConfig(1024)); err != nil {
+ t.Error(err)
+ }
+}
+
+func decodeHex(s string) []byte {
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/go.mod b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/go.mod
new file mode 100644
index 0000000..78e04e9
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/go.mod
@@ -0,0 +1,3 @@
+module filippo.io/edwards25519
+
+go 1.20
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar.go
new file mode 100644
index 0000000..3fd1653
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar.go
@@ -0,0 +1,343 @@
+// Copyright (c) 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "encoding/binary"
+ "errors"
+)
+
+// A Scalar is an integer modulo
+//
+// l = 2^252 + 27742317777372353535851937790883648493
+//
+// which is the prime order of the edwards25519 group.
+//
+// This type works similarly to math/big.Int, and all arguments and
+// receivers are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Scalar struct {
+ // s is the scalar in the Montgomery domain, in the format of the
+ // fiat-crypto implementation.
+ s fiatScalarMontgomeryDomainFieldElement
+}
+
+// The field implementation in scalar_fiat.go is generated by the fiat-crypto
+// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc)
+// from a formally verified model.
+//
+// fiat-crypto code comes under the following license.
+//
+// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
+// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+// NewScalar returns a new zero Scalar.
+func NewScalar() *Scalar {
+ return &Scalar{}
+}
+
+// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to
+// using Multiply and then Add.
+func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
+ // Make a copy of z in case it aliases s.
+ zCopy := new(Scalar).Set(z)
+ return s.Multiply(x, y).Add(s, zCopy)
+}
+
+// Add sets s = x + y mod l, and returns s.
+func (s *Scalar) Add(x, y *Scalar) *Scalar {
+ // s = 1 * x + y mod l
+ fiatScalarAdd(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Subtract sets s = x - y mod l, and returns s.
+func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
+ // s = -1 * y + x mod l
+ fiatScalarSub(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Negate sets s = -x mod l, and returns s.
+func (s *Scalar) Negate(x *Scalar) *Scalar {
+ // s = -1 * x + 0 mod l
+ fiatScalarOpp(&s.s, &x.s)
+ return s
+}
+
+// Multiply sets s = x * y mod l, and returns s.
+func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
+ // s = x * y + 0 mod l
+ fiatScalarMul(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Set sets s = x, and returns s.
+func (s *Scalar) Set(x *Scalar) *Scalar {
+ *s = *x
+ return s
+}
+
+// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
+// If x is not of the right length, SetUniformBytes returns nil and an error,
+// and the receiver is unchanged.
+//
+// SetUniformBytes can be used to set s to a uniformly distributed value given
+// 64 uniformly distributed random bytes.
+func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
+ if len(x) != 64 {
+ return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
+ }
+
+ // We have a value x of 512 bits, but our fiatScalarFromBytes function
+ // expects an input lower than l, which is a little over 252 bits.
+ //
+ // Instead of writing a reduction function that operates on wider inputs, we
+ // can interpret x as the sum of three shorter values a, b, and c.
+ //
+ // x = a + b * 2^168 + c * 2^336 mod l
+ //
+ // We then precompute 2^168 and 2^336 modulo l, and perform the reduction
+ // with two multiplications and two additions.
+
+ s.setShortBytes(x[:21])
+ t := new(Scalar).setShortBytes(x[21:42])
+ s.Add(s, t.Multiply(t, scalarTwo168))
+ t.setShortBytes(x[42:])
+ s.Add(s, t.Multiply(t, scalarTwo336))
+
+ return s, nil
+}
+
+// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a
+// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value
+// in the 2^256 Montgomery domain.
+var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7,
+ 0xa2c131b399411b7c, 0x6329a7ed9ce5a30}}
+var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b,
+ 0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}}
+
+// setShortBytes sets s = x mod l, where x is a little-endian integer shorter
+// than 32 bytes.
+func (s *Scalar) setShortBytes(x []byte) *Scalar {
+ if len(x) >= 32 {
+ panic("edwards25519: internal error: setShortBytes called with a long string")
+ }
+ var buf [32]byte
+ copy(buf[:], x)
+ fiatScalarFromBytes((*[4]uint64)(&s.s), &buf)
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+ return s
+}
+
+// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
+// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
+// returns nil and an error, and the receiver is unchanged.
+func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
+ if len(x) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+ if !isReduced(x) {
+ return nil, errors.New("invalid scalar encoding")
+ }
+
+ fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x))
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+
+ return s, nil
+}
+
+// scalarMinusOneBytes is l - 1 in little endian.
+var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}
+
+// isReduced returns whether the given scalar in 32-byte little endian encoded
+// form is reduced modulo l.
+func isReduced(s []byte) bool {
+ if len(s) != 32 {
+ return false
+ }
+
+ for i := len(s) - 1; i >= 0; i-- {
+ switch {
+ case s[i] > scalarMinusOneBytes[i]:
+ return false
+ case s[i] < scalarMinusOneBytes[i]:
+ return true
+ }
+ }
+ return true
+}
+
+// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
+// Section 5.1.5 (also known as clamping) and sets s to the result. The input
+// must be 32 bytes, and it is not modified. If x is not of the right length,
+// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
+//
+// Note that since Scalar values are always reduced modulo the prime order of
+// the curve, the resulting value will not preserve any of the cofactor-clearing
+// properties that clamping is meant to provide. It will however work as
+// expected as long as it is applied to points on the prime order subgroup, like
+// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
+// irrelevant RFC 7748 clamping, but it is now required for compatibility.
+func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
+ // The description above omits the purpose of the high bits of the clamping
+ // for brevity, but those are also lost to reductions, and are also
+ // irrelevant to edwards25519 as they protect against a specific
+ // implementation bug that was once observed in a generic Montgomery ladder.
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
+ }
+
+ // We need to use the wide reduction from SetUniformBytes, since clamping
+ // sets the 2^254 bit, making the value higher than the order.
+ var wideBytes [64]byte
+ copy(wideBytes[:], x[:])
+ wideBytes[0] &= 248
+ wideBytes[31] &= 63
+ wideBytes[31] |= 64
+ return s.SetUniformBytes(wideBytes[:])
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of s.
+func (s *Scalar) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var encoded [32]byte
+ return s.bytes(&encoded)
+}
+
+func (s *Scalar) bytes(out *[32]byte) []byte {
+ var ss fiatScalarNonMontgomeryDomainFieldElement
+ fiatScalarFromMontgomery(&ss, &s.s)
+ fiatScalarToBytes(out, (*[4]uint64)(&ss))
+ return out[:]
+}
+
+// Equal returns 1 if s and t are equal, and 0 otherwise.
+func (s *Scalar) Equal(t *Scalar) int {
+ var diff fiatScalarMontgomeryDomainFieldElement
+ fiatScalarSub(&diff, &s.s, &t.s)
+ var nonzero uint64
+ fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff))
+ nonzero |= nonzero >> 32
+ nonzero |= nonzero >> 16
+ nonzero |= nonzero >> 8
+ nonzero |= nonzero >> 4
+ nonzero |= nonzero >> 2
+ nonzero |= nonzero >> 1
+ return int(^nonzero) & 1
+}
+
+// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
+//
+// w must be between 2 and 8, or nonAdjacentForm will panic.
+func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
+ // This implementation is adapted from the one
+ // in curve25519-dalek and is documented there:
+ // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+ if w < 2 {
+ panic("w must be at least 2 by the definition of NAF")
+ } else if w > 8 {
+ panic("NAF digits must fit in int8")
+ }
+
+ var naf [256]int8
+ var digits [5]uint64
+
+ for i := 0; i < 4; i++ {
+ digits[i] = binary.LittleEndian.Uint64(b[i*8:])
+ }
+
+ width := uint64(1 << w)
+ windowMask := uint64(width - 1)
+
+ pos := uint(0)
+ carry := uint64(0)
+ for pos < 256 {
+ indexU64 := pos / 64
+ indexBit := pos % 64
+ var bitBuf uint64
+ if indexBit < 64-w {
+ // This window's bits are contained in a single u64
+ bitBuf = digits[indexU64] >> indexBit
+ } else {
+ // Combine the current 64 bits with bits from the next 64
+ bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
+ }
+
+ // Add carry into the current window
+ window := carry + (bitBuf & windowMask)
+
+ if window&1 == 0 {
+ // If the window value is even, preserve the carry and continue.
+ // Why is the carry preserved?
+ // If carry == 0 and window & 1 == 0,
+ // then the next carry should be 0
+ // If carry == 1 and window & 1 == 0,
+ // then bit_buf & 1 == 1 so the next carry should be 1
+ pos += 1
+ continue
+ }
+
+ if window < width/2 {
+ carry = 0
+ naf[pos] = int8(window)
+ } else {
+ carry = 1
+ naf[pos] = int8(window) - int8(width)
+ }
+
+ pos += w
+ }
+ return naf
+}
+
+func (s *Scalar) signedRadix16() [64]int8 {
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+
+ var digits [64]int8
+
+ // Compute unsigned radix-16 digits:
+ for i := 0; i < 32; i++ {
+ digits[2*i] = int8(b[i] & 15)
+ digits[2*i+1] = int8((b[i] >> 4) & 15)
+ }
+
+ // Recenter coefficients:
+ for i := 0; i < 63; i++ {
+ carry := (digits[i] + 8) >> 4
+ digits[i] -= carry << 4
+ digits[i+1] += carry
+ }
+
+ return digits
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_alias_test.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_alias_test.go
new file mode 100644
index 0000000..1bca1b0
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_alias_test.go
@@ -0,0 +1,111 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "testing"
+ "testing/quick"
+)
+
+func TestScalarAliasing(t *testing.T) {
+ checkAliasingOneArg := func(f func(v, x *Scalar) *Scalar, v, x Scalar) bool {
+ x1, v1 := x, x
+
+ // Calculate a reference f(x) without aliasing.
+ if out := f(&v, &x); out != &v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Test aliasing the argument and the receiver.
+ if out := f(&v1, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Ensure the arguments was not modified.
+ return x == x1
+ }
+
+ checkAliasingTwoArgs := func(f func(v, x, y *Scalar) *Scalar, v, x, y Scalar) bool {
+ x1, y1, v1 := x, y, Scalar{}
+
+ // Calculate a reference f(x, y) without aliasing.
+ if out := f(&v, &x, &y); out != &v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &y); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = y
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Calculate a reference f(x, x) without aliasing.
+ if out := f(&v, &x, &x); out != &v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &x); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = x
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+ // Test aliasing both arguments and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Ensure the arguments were not modified.
+ return x == x1 && y == y1
+ }
+
+ for name, f := range map[string]interface{}{
+ "Negate": func(v, x Scalar) bool {
+ return checkAliasingOneArg((*Scalar).Negate, v, x)
+ },
+ "Invert": func(v, x Scalar) bool {
+ return checkAliasingOneArg((*Scalar).Invert, v, x)
+ },
+ "Multiply": func(v, x, y Scalar) bool {
+ return checkAliasingTwoArgs((*Scalar).Multiply, v, x, y)
+ },
+ "Add": func(v, x, y Scalar) bool {
+ return checkAliasingTwoArgs((*Scalar).Add, v, x, y)
+ },
+ "Subtract": func(v, x, y Scalar) bool {
+ return checkAliasingTwoArgs((*Scalar).Subtract, v, x, y)
+ },
+ "MultiplyAdd1": func(v, x, y, fixed Scalar) bool {
+ return checkAliasingTwoArgs(func(v, x, y *Scalar) *Scalar {
+ return v.MultiplyAdd(&fixed, x, y)
+ }, v, x, y)
+ },
+ "MultiplyAdd2": func(v, x, y, fixed Scalar) bool {
+ return checkAliasingTwoArgs(func(v, x, y *Scalar) *Scalar {
+ return v.MultiplyAdd(x, &fixed, y)
+ }, v, x, y)
+ },
+ "MultiplyAdd3": func(v, x, y, fixed Scalar) bool {
+ return checkAliasingTwoArgs(func(v, x, y *Scalar) *Scalar {
+ return v.MultiplyAdd(x, y, &fixed)
+ }, v, x, y)
+ },
+ } {
+ err := quick.Check(f, quickCheckConfig(32))
+ if err != nil {
+ t.Errorf("%v: %v", name, err)
+ }
+ }
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_fiat.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_fiat.go
new file mode 100644
index 0000000..2e5782b
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_fiat.go
@@ -0,0 +1,1147 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name edwards25519 Scalar 64 '2^252 + 27742317777372353535851937790883648493' mul add sub opp nonzero from_montgomery to_montgomery to_bytes from_bytes
+//
+// curve description: Scalar
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, add, sub, opp, nonzero, from_montgomery, to_montgomery, to_bytes, from_bytes
+//
+// m = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed (from "2^252 + 27742317777372353535851937790883648493")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package edwards25519
+
+import "math/bits"
+
+type fiatScalarUint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type fiatScalarInt1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type fiatScalarMontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarMontgomeryDomainFieldElement [4]uint64
+
+// The type fiatScalarNonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarNonMontgomeryDomainFieldElement [4]uint64
+
+// fiatScalarCmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarCmovznzU64(out1 *uint64, arg1 fiatScalarUint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// fiatScalarMul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarMul(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg2[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg2[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg2[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg2[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ x19 := (uint64(fiatScalarUint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0x1000000000000000)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0x14def9dea2f79cd6)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0x5812631a5cf5d3ed)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ x30 := (uint64(fiatScalarUint1(x29)) + x25)
+ var x32 uint64
+ _, x32 = bits.Add64(x11, x26, uint64(0x0))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x13, x28, uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x15, x30, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x17, x22, uint64(fiatScalarUint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x19, x23, uint64(fiatScalarUint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, arg2[3])
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg2[2])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg2[1])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg2[0])
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x48, x45, uint64(0x0))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x46, x43, uint64(fiatScalarUint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x44, x41, uint64(fiatScalarUint1(x52)))
+ x55 := (uint64(fiatScalarUint1(x54)) + x42)
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x33, x47, uint64(0x0))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(fiatScalarUint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(uint64(fiatScalarUint1(x40)), x55, uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x56, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ x76 := (uint64(fiatScalarUint1(x75)) + x71)
+ var x78 uint64
+ _, x78 = bits.Add64(x56, x72, uint64(0x0))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x58, x74, uint64(fiatScalarUint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x60, x76, uint64(fiatScalarUint1(x80)))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x62, x68, uint64(fiatScalarUint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x64, x69, uint64(fiatScalarUint1(x84)))
+ x87 := (uint64(fiatScalarUint1(x86)) + uint64(fiatScalarUint1(x65)))
+ var x88 uint64
+ var x89 uint64
+ x89, x88 = bits.Mul64(x2, arg2[3])
+ var x90 uint64
+ var x91 uint64
+ x91, x90 = bits.Mul64(x2, arg2[2])
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg2[1])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg2[0])
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x95, x92, uint64(0x0))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x93, x90, uint64(fiatScalarUint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x91, x88, uint64(fiatScalarUint1(x99)))
+ x102 := (uint64(fiatScalarUint1(x101)) + x89)
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x79, x94, uint64(0x0))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x81, x96, uint64(fiatScalarUint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(fiatScalarUint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(fiatScalarUint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ _, x113 = bits.Mul64(x103, 0xd2b51da312547e1b)
+ var x115 uint64
+ var x116 uint64
+ x116, x115 = bits.Mul64(x113, 0x1000000000000000)
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x113, 0x14def9dea2f79cd6)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x113, 0x5812631a5cf5d3ed)
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x120, x117, uint64(0x0))
+ x123 := (uint64(fiatScalarUint1(x122)) + x118)
+ var x125 uint64
+ _, x125 = bits.Add64(x103, x119, uint64(0x0))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x105, x121, uint64(fiatScalarUint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x123, uint64(fiatScalarUint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x109, x115, uint64(fiatScalarUint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x111, x116, uint64(fiatScalarUint1(x131)))
+ x134 := (uint64(fiatScalarUint1(x133)) + uint64(fiatScalarUint1(x112)))
+ var x135 uint64
+ var x136 uint64
+ x136, x135 = bits.Mul64(x3, arg2[3])
+ var x137 uint64
+ var x138 uint64
+ x138, x137 = bits.Mul64(x3, arg2[2])
+ var x139 uint64
+ var x140 uint64
+ x140, x139 = bits.Mul64(x3, arg2[1])
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg2[0])
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x142, x139, uint64(0x0))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x140, x137, uint64(fiatScalarUint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x138, x135, uint64(fiatScalarUint1(x146)))
+ x149 := (uint64(fiatScalarUint1(x148)) + x136)
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x126, x141, uint64(0x0))
+ var x152 uint64
+ var x153 uint64
+ x152, x153 = bits.Add64(x128, x143, uint64(fiatScalarUint1(x151)))
+ var x154 uint64
+ var x155 uint64
+ x154, x155 = bits.Add64(x130, x145, uint64(fiatScalarUint1(x153)))
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(fiatScalarUint1(x155)))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(fiatScalarUint1(x157)))
+ var x160 uint64
+ _, x160 = bits.Mul64(x150, 0xd2b51da312547e1b)
+ var x162 uint64
+ var x163 uint64
+ x163, x162 = bits.Mul64(x160, 0x1000000000000000)
+ var x164 uint64
+ var x165 uint64
+ x165, x164 = bits.Mul64(x160, 0x14def9dea2f79cd6)
+ var x166 uint64
+ var x167 uint64
+ x167, x166 = bits.Mul64(x160, 0x5812631a5cf5d3ed)
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x167, x164, uint64(0x0))
+ x170 := (uint64(fiatScalarUint1(x169)) + x165)
+ var x172 uint64
+ _, x172 = bits.Add64(x150, x166, uint64(0x0))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x152, x168, uint64(fiatScalarUint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x154, x170, uint64(fiatScalarUint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x156, x162, uint64(fiatScalarUint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x158, x163, uint64(fiatScalarUint1(x178)))
+ x181 := (uint64(fiatScalarUint1(x180)) + uint64(fiatScalarUint1(x159)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Sub64(x173, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Sub64(x175, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Sub64(x177, uint64(0x0), uint64(fiatScalarUint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Sub64(x179, 0x1000000000000000, uint64(fiatScalarUint1(x187)))
+ var x191 uint64
+ _, x191 = bits.Sub64(x181, uint64(0x0), uint64(fiatScalarUint1(x189)))
+ var x192 uint64
+ fiatScalarCmovznzU64(&x192, fiatScalarUint1(x191), x182, x173)
+ var x193 uint64
+ fiatScalarCmovznzU64(&x193, fiatScalarUint1(x191), x184, x175)
+ var x194 uint64
+ fiatScalarCmovznzU64(&x194, fiatScalarUint1(x191), x186, x177)
+ var x195 uint64
+ fiatScalarCmovznzU64(&x195, fiatScalarUint1(x191), x188, x179)
+ out1[0] = x192
+ out1[1] = x193
+ out1[2] = x194
+ out1[3] = x195
+}
+
+// fiatScalarAdd adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarAdd(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(x1, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(x3, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(fiatScalarUint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x7, 0x1000000000000000, uint64(fiatScalarUint1(x14)))
+ var x18 uint64
+ _, x18 = bits.Sub64(uint64(fiatScalarUint1(x8)), uint64(0x0), uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ fiatScalarCmovznzU64(&x19, fiatScalarUint1(x18), x9, x1)
+ var x20 uint64
+ fiatScalarCmovznzU64(&x20, fiatScalarUint1(x18), x11, x3)
+ var x21 uint64
+ fiatScalarCmovznzU64(&x21, fiatScalarUint1(x18), x13, x5)
+ var x22 uint64
+ fiatScalarCmovznzU64(&x22, fiatScalarUint1(x18), x15, x7)
+ out1[0] = x19
+ out1[1] = x20
+ out1[2] = x21
+ out1[3] = x22
+}
+
+// fiatScalarSub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarSub(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarOpp negates a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarOpp(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(uint64(0x0), arg1[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(uint64(0x0), arg1[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(uint64(0x0), arg1[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(uint64(0x0), arg1[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarNonzero outputs a single non-zero word if the input is non-zero and zero otherwise.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarNonzero(out1 *uint64, arg1 *[4]uint64) {
+ x1 := (arg1[0] | (arg1[1] | (arg1[2] | arg1[3])))
+ *out1 = x1
+}
+
+// fiatScalarFromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarFromMontgomery(out1 *fiatScalarNonMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0xd2b51da312547e1b)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0x1000000000000000)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0x14def9dea2f79cd6)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0x5812631a5cf5d3ed)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x9, x6, uint64(0x0))
+ var x13 uint64
+ _, x13 = bits.Add64(x1, x8, uint64(0x0))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(uint64(0x0), x10, uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x14, arg1[1], uint64(0x0))
+ var x18 uint64
+ _, x18 = bits.Mul64(x16, 0xd2b51da312547e1b)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x18, 0x1000000000000000)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x18, 0x14def9dea2f79cd6)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x18, 0x5812631a5cf5d3ed)
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x25, x22, uint64(0x0))
+ var x29 uint64
+ _, x29 = bits.Add64(x16, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64((uint64(fiatScalarUint1(x17)) + (uint64(fiatScalarUint1(x15)) + (uint64(fiatScalarUint1(x11)) + x7))), x26, uint64(fiatScalarUint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x4, (uint64(fiatScalarUint1(x27)) + x23), uint64(fiatScalarUint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x5, x20, uint64(fiatScalarUint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x30, arg1[2], uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x32, uint64(0x0), uint64(fiatScalarUint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x34, uint64(0x0), uint64(fiatScalarUint1(x39)))
+ var x42 uint64
+ _, x42 = bits.Mul64(x36, 0xd2b51da312547e1b)
+ var x44 uint64
+ var x45 uint64
+ x45, x44 = bits.Mul64(x42, 0x1000000000000000)
+ var x46 uint64
+ var x47 uint64
+ x47, x46 = bits.Mul64(x42, 0x14def9dea2f79cd6)
+ var x48 uint64
+ var x49 uint64
+ x49, x48 = bits.Mul64(x42, 0x5812631a5cf5d3ed)
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x49, x46, uint64(0x0))
+ var x53 uint64
+ _, x53 = bits.Add64(x36, x48, uint64(0x0))
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x38, x50, uint64(fiatScalarUint1(x53)))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x40, (uint64(fiatScalarUint1(x51)) + x47), uint64(fiatScalarUint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64((uint64(fiatScalarUint1(x41)) + (uint64(fiatScalarUint1(x35)) + x21)), x44, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x54, arg1[3], uint64(0x0))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x56, uint64(0x0), uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x58, uint64(0x0), uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x60, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ var x77 uint64
+ _, x77 = bits.Add64(x60, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x62, x74, uint64(fiatScalarUint1(x77)))
+ var x80 uint64
+ var x81 uint64
+ x80, x81 = bits.Add64(x64, (uint64(fiatScalarUint1(x75)) + x71), uint64(fiatScalarUint1(x79)))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64((uint64(fiatScalarUint1(x65)) + (uint64(fiatScalarUint1(x59)) + x45)), x68, uint64(fiatScalarUint1(x81)))
+ x84 := (uint64(fiatScalarUint1(x83)) + x69)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Sub64(x78, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Sub64(x80, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Sub64(x82, uint64(0x0), uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Sub64(x84, 0x1000000000000000, uint64(fiatScalarUint1(x90)))
+ var x94 uint64
+ _, x94 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ fiatScalarCmovznzU64(&x95, fiatScalarUint1(x94), x85, x78)
+ var x96 uint64
+ fiatScalarCmovznzU64(&x96, fiatScalarUint1(x94), x87, x80)
+ var x97 uint64
+ fiatScalarCmovznzU64(&x97, fiatScalarUint1(x94), x89, x82)
+ var x98 uint64
+ fiatScalarCmovznzU64(&x98, fiatScalarUint1(x94), x91, x84)
+ out1[0] = x95
+ out1[1] = x96
+ out1[2] = x97
+ out1[3] = x98
+}
+
+// fiatScalarToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func fiatScalarToMontgomery(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarNonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, 0x399411b7c309a3d)
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, 0xceec73d217f5be65)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, 0xd00e1ba768859347)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, 0xa40611e3449c0f01)
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ _, x19 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x19, 0x1000000000000000)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x19, 0x14def9dea2f79cd6)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x19, 0x5812631a5cf5d3ed)
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x26, x23, uint64(0x0))
+ var x30 uint64
+ _, x30 = bits.Add64(x11, x25, uint64(0x0))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x13, x27, uint64(fiatScalarUint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x15, (uint64(fiatScalarUint1(x28)) + x24), uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x17, x21, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x38, x37 = bits.Mul64(x1, 0x399411b7c309a3d)
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, 0xceec73d217f5be65)
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, 0xd00e1ba768859347)
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, 0xa40611e3449c0f01)
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64(x44, x41, uint64(0x0))
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x42, x39, uint64(fiatScalarUint1(x46)))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x40, x37, uint64(fiatScalarUint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x31, x43, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x33, x45, uint64(fiatScalarUint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x35, x47, uint64(fiatScalarUint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(((uint64(fiatScalarUint1(x36)) + (uint64(fiatScalarUint1(x18)) + x6)) + x22), x49, uint64(fiatScalarUint1(x56)))
+ var x59 uint64
+ _, x59 = bits.Mul64(x51, 0xd2b51da312547e1b)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x59, 0x1000000000000000)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x59, 0x14def9dea2f79cd6)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x59, 0x5812631a5cf5d3ed)
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x66, x63, uint64(0x0))
+ var x70 uint64
+ _, x70 = bits.Add64(x51, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x53, x67, uint64(fiatScalarUint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x55, (uint64(fiatScalarUint1(x68)) + x64), uint64(fiatScalarUint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x57, x61, uint64(fiatScalarUint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x2, 0x399411b7c309a3d)
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x2, 0xceec73d217f5be65)
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x2, 0xd00e1ba768859347)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x2, 0xa40611e3449c0f01)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x84, x81, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x82, x79, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x80, x77, uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Add64(x71, x83, uint64(0x0))
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x73, x85, uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x75, x87, uint64(fiatScalarUint1(x94)))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(((uint64(fiatScalarUint1(x76)) + (uint64(fiatScalarUint1(x58)) + (uint64(fiatScalarUint1(x50)) + x38))) + x62), x89, uint64(fiatScalarUint1(x96)))
+ var x99 uint64
+ _, x99 = bits.Mul64(x91, 0xd2b51da312547e1b)
+ var x101 uint64
+ var x102 uint64
+ x102, x101 = bits.Mul64(x99, 0x1000000000000000)
+ var x103 uint64
+ var x104 uint64
+ x104, x103 = bits.Mul64(x99, 0x14def9dea2f79cd6)
+ var x105 uint64
+ var x106 uint64
+ x106, x105 = bits.Mul64(x99, 0x5812631a5cf5d3ed)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x106, x103, uint64(0x0))
+ var x110 uint64
+ _, x110 = bits.Add64(x91, x105, uint64(0x0))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x93, x107, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x95, (uint64(fiatScalarUint1(x108)) + x104), uint64(fiatScalarUint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x97, x101, uint64(fiatScalarUint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x3, 0x399411b7c309a3d)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x3, 0xceec73d217f5be65)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x3, 0xd00e1ba768859347)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x3, 0xa40611e3449c0f01)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(fiatScalarUint1(x126)))
+ var x129 uint64
+ var x130 uint64
+ x129, x130 = bits.Add64(x120, x117, uint64(fiatScalarUint1(x128)))
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x111, x123, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x113, x125, uint64(fiatScalarUint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x115, x127, uint64(fiatScalarUint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(((uint64(fiatScalarUint1(x116)) + (uint64(fiatScalarUint1(x98)) + (uint64(fiatScalarUint1(x90)) + x78))) + x102), x129, uint64(fiatScalarUint1(x136)))
+ var x139 uint64
+ _, x139 = bits.Mul64(x131, 0xd2b51da312547e1b)
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x139, 0x1000000000000000)
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x139, 0x14def9dea2f79cd6)
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x139, 0x5812631a5cf5d3ed)
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x146, x143, uint64(0x0))
+ var x150 uint64
+ _, x150 = bits.Add64(x131, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x133, x147, uint64(fiatScalarUint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x135, (uint64(fiatScalarUint1(x148)) + x144), uint64(fiatScalarUint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x155, x156 = bits.Add64(x137, x141, uint64(fiatScalarUint1(x154)))
+ x157 := ((uint64(fiatScalarUint1(x156)) + (uint64(fiatScalarUint1(x138)) + (uint64(fiatScalarUint1(x130)) + x118))) + x142)
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Sub64(x151, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Sub64(x153, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Sub64(x155, uint64(0x0), uint64(fiatScalarUint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Sub64(x157, 0x1000000000000000, uint64(fiatScalarUint1(x163)))
+ var x167 uint64
+ _, x167 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x165)))
+ var x168 uint64
+ fiatScalarCmovznzU64(&x168, fiatScalarUint1(x167), x158, x151)
+ var x169 uint64
+ fiatScalarCmovznzU64(&x169, fiatScalarUint1(x167), x160, x153)
+ var x170 uint64
+ fiatScalarCmovznzU64(&x170, fiatScalarUint1(x167), x162, x155)
+ var x171 uint64
+ fiatScalarCmovznzU64(&x171, fiatScalarUint1(x167), x164, x157)
+ out1[0] = x168
+ out1[1] = x169
+ out1[2] = x170
+ out1[3] = x171
+}
+
+// fiatScalarToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+func fiatScalarToBytes(out1 *[32]uint8, arg1 *[4]uint64) {
+ x1 := arg1[3]
+ x2 := arg1[2]
+ x3 := arg1[1]
+ x4 := arg1[0]
+ x5 := (uint8(x4) & 0xff)
+ x6 := (x4 >> 8)
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := uint8((x16 >> 8))
+ x19 := (uint8(x3) & 0xff)
+ x20 := (x3 >> 8)
+ x21 := (uint8(x20) & 0xff)
+ x22 := (x20 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := uint8((x30 >> 8))
+ x33 := (uint8(x2) & 0xff)
+ x34 := (x2 >> 8)
+ x35 := (uint8(x34) & 0xff)
+ x36 := (x34 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := uint8((x44 >> 8))
+ x47 := (uint8(x1) & 0xff)
+ x48 := (x1 >> 8)
+ x49 := (uint8(x48) & 0xff)
+ x50 := (x48 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := (x50 >> 8)
+ x53 := (uint8(x52) & 0xff)
+ x54 := (x52 >> 8)
+ x55 := (uint8(x54) & 0xff)
+ x56 := (x54 >> 8)
+ x57 := (uint8(x56) & 0xff)
+ x58 := (x56 >> 8)
+ x59 := (uint8(x58) & 0xff)
+ x60 := uint8((x58 >> 8))
+ out1[0] = x5
+ out1[1] = x7
+ out1[2] = x9
+ out1[3] = x11
+ out1[4] = x13
+ out1[5] = x15
+ out1[6] = x17
+ out1[7] = x18
+ out1[8] = x19
+ out1[9] = x21
+ out1[10] = x23
+ out1[11] = x25
+ out1[12] = x27
+ out1[13] = x29
+ out1[14] = x31
+ out1[15] = x32
+ out1[16] = x33
+ out1[17] = x35
+ out1[18] = x37
+ out1[19] = x39
+ out1[20] = x41
+ out1[21] = x43
+ out1[22] = x45
+ out1[23] = x46
+ out1[24] = x47
+ out1[25] = x49
+ out1[26] = x51
+ out1[27] = x53
+ out1[28] = x55
+ out1[29] = x57
+ out1[30] = x59
+ out1[31] = x60
+}
+
+// fiatScalarFromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+func fiatScalarFromBytes(out1 *[4]uint64, arg1 *[32]uint8) {
+ x1 := (uint64(arg1[31]) << 56)
+ x2 := (uint64(arg1[30]) << 48)
+ x3 := (uint64(arg1[29]) << 40)
+ x4 := (uint64(arg1[28]) << 32)
+ x5 := (uint64(arg1[27]) << 24)
+ x6 := (uint64(arg1[26]) << 16)
+ x7 := (uint64(arg1[25]) << 8)
+ x8 := arg1[24]
+ x9 := (uint64(arg1[23]) << 56)
+ x10 := (uint64(arg1[22]) << 48)
+ x11 := (uint64(arg1[21]) << 40)
+ x12 := (uint64(arg1[20]) << 32)
+ x13 := (uint64(arg1[19]) << 24)
+ x14 := (uint64(arg1[18]) << 16)
+ x15 := (uint64(arg1[17]) << 8)
+ x16 := arg1[16]
+ x17 := (uint64(arg1[15]) << 56)
+ x18 := (uint64(arg1[14]) << 48)
+ x19 := (uint64(arg1[13]) << 40)
+ x20 := (uint64(arg1[12]) << 32)
+ x21 := (uint64(arg1[11]) << 24)
+ x22 := (uint64(arg1[10]) << 16)
+ x23 := (uint64(arg1[9]) << 8)
+ x24 := arg1[8]
+ x25 := (uint64(arg1[7]) << 56)
+ x26 := (uint64(arg1[6]) << 48)
+ x27 := (uint64(arg1[5]) << 40)
+ x28 := (uint64(arg1[4]) << 32)
+ x29 := (uint64(arg1[3]) << 24)
+ x30 := (uint64(arg1[2]) << 16)
+ x31 := (uint64(arg1[1]) << 8)
+ x32 := arg1[0]
+ x33 := (x31 + uint64(x32))
+ x34 := (x30 + x33)
+ x35 := (x29 + x34)
+ x36 := (x28 + x35)
+ x37 := (x27 + x36)
+ x38 := (x26 + x37)
+ x39 := (x25 + x38)
+ x40 := (x23 + uint64(x24))
+ x41 := (x22 + x40)
+ x42 := (x21 + x41)
+ x43 := (x20 + x42)
+ x44 := (x19 + x43)
+ x45 := (x18 + x44)
+ x46 := (x17 + x45)
+ x47 := (x15 + uint64(x16))
+ x48 := (x14 + x47)
+ x49 := (x13 + x48)
+ x50 := (x12 + x49)
+ x51 := (x11 + x50)
+ x52 := (x10 + x51)
+ x53 := (x9 + x52)
+ x54 := (x7 + uint64(x8))
+ x55 := (x6 + x54)
+ x56 := (x5 + x55)
+ x57 := (x4 + x56)
+ x58 := (x3 + x57)
+ x59 := (x2 + x58)
+ x60 := (x1 + x59)
+ out1[0] = x39
+ out1[1] = x46
+ out1[2] = x53
+ out1[3] = x60
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_test.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_test.go
new file mode 100644
index 0000000..05551ef
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalar_test.go
@@ -0,0 +1,255 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "bytes"
+ "encoding/hex"
+ "math/big"
+ mathrand "math/rand"
+ "reflect"
+ "testing"
+ "testing/quick"
+)
+
+// quickCheckConfig returns a quick.Config that scales the max count by the
+// given factor if the -short flag is not set.
+func quickCheckConfig(slowScale int) *quick.Config {
+ cfg := new(quick.Config)
+ if !testing.Short() {
+ cfg.MaxCountScale = float64(slowScale)
+ }
+ return cfg
+}
+
+var scOneBytes = [32]byte{1}
+var scOne, _ = new(Scalar).SetCanonicalBytes(scOneBytes[:])
+var scMinusOne, _ = new(Scalar).SetCanonicalBytes(scalarMinusOneBytes[:])
+
+// Generate returns a valid (reduced modulo l) Scalar with a distribution
+// weighted towards high, low, and edge values.
+func (Scalar) Generate(rand *mathrand.Rand, size int) reflect.Value {
+ var s [32]byte
+ diceRoll := rand.Intn(100)
+ switch {
+ case diceRoll == 0:
+ case diceRoll == 1:
+ s = scOneBytes
+ case diceRoll == 2:
+ s = scalarMinusOneBytes
+ case diceRoll < 5:
+ // Generate a low scalar in [0, 2^125).
+ rand.Read(s[:16])
+ s[15] &= (1 << 5) - 1
+ case diceRoll < 10:
+ // Generate a high scalar in [2^252, 2^252 + 2^124).
+ s[31] = 1 << 4
+ rand.Read(s[:16])
+ s[15] &= (1 << 4) - 1
+ default:
+ // Generate a valid scalar in [0, l) by returning [0, 2^252) which has a
+ // negligibly different distribution (the former has a 2^-127.6 chance
+ // of being out of the latter range).
+ rand.Read(s[:])
+ s[31] &= (1 << 4) - 1
+ }
+
+ val := Scalar{}
+ fiatScalarFromBytes((*[4]uint64)(&val.s), &s)
+ fiatScalarToMontgomery(&val.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&val.s))
+
+ return reflect.ValueOf(val)
+}
+
+func TestScalarGenerate(t *testing.T) {
+ f := func(sc Scalar) bool {
+ return isReduced(sc.Bytes())
+ }
+ if err := quick.Check(f, quickCheckConfig(1024)); err != nil {
+ t.Errorf("generated unreduced scalar: %v", err)
+ }
+}
+
+func TestScalarSetCanonicalBytes(t *testing.T) {
+ f1 := func(in [32]byte, sc Scalar) bool {
+ // Mask out top 4 bits to guarantee value falls in [0, l).
+ in[len(in)-1] &= (1 << 4) - 1
+ if _, err := sc.SetCanonicalBytes(in[:]); err != nil {
+ return false
+ }
+ repr := sc.Bytes()
+ return bytes.Equal(in[:], repr) && isReduced(repr)
+ }
+ if err := quick.Check(f1, quickCheckConfig(1024)); err != nil {
+ t.Errorf("failed bytes->scalar->bytes round-trip: %v", err)
+ }
+
+ f2 := func(sc1, sc2 Scalar) bool {
+ if _, err := sc2.SetCanonicalBytes(sc1.Bytes()); err != nil {
+ return false
+ }
+ return sc1 == sc2
+ }
+ if err := quick.Check(f2, quickCheckConfig(1024)); err != nil {
+ t.Errorf("failed scalar->bytes->scalar round-trip: %v", err)
+ }
+
+ b := scalarMinusOneBytes
+ b[31] += 1
+ s := scOne
+ if out, err := s.SetCanonicalBytes(b[:]); err == nil {
+ t.Errorf("SetCanonicalBytes worked on a non-canonical value")
+ } else if s != scOne {
+ t.Errorf("SetCanonicalBytes modified its receiver")
+ } else if out != nil {
+ t.Errorf("SetCanonicalBytes did not return nil with an error")
+ }
+}
+
+func TestScalarSetUniformBytes(t *testing.T) {
+ mod, _ := new(big.Int).SetString("27742317777372353535851937790883648493", 10)
+ mod.Add(mod, new(big.Int).Lsh(big.NewInt(1), 252))
+ f := func(in [64]byte, sc Scalar) bool {
+ sc.SetUniformBytes(in[:])
+ repr := sc.Bytes()
+ if !isReduced(repr) {
+ return false
+ }
+ scBig := bigIntFromLittleEndianBytes(repr[:])
+ inBig := bigIntFromLittleEndianBytes(in[:])
+ return inBig.Mod(inBig, mod).Cmp(scBig) == 0
+ }
+ if err := quick.Check(f, quickCheckConfig(1024)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarSetBytesWithClamping(t *testing.T) {
+ // Generated with libsodium.js 1.0.18 crypto_scalarmult_ed25519_base.
+
+ random := "633d368491364dc9cd4c1bf891b1d59460face1644813240a313e61f2c88216e"
+ s, _ := new(Scalar).SetBytesWithClamping(decodeHex(random))
+ p := new(Point).ScalarBaseMult(s)
+ want := "1d87a9026fd0126a5736fe1628c95dd419172b5b618457e041c9c861b2494a94"
+ if got := hex.EncodeToString(p.Bytes()); got != want {
+ t.Errorf("random: got %q, want %q", got, want)
+ }
+
+ zero := "0000000000000000000000000000000000000000000000000000000000000000"
+ s, _ = new(Scalar).SetBytesWithClamping(decodeHex(zero))
+ p = new(Point).ScalarBaseMult(s)
+ want = "693e47972caf527c7883ad1b39822f026f47db2ab0e1919955b8993aa04411d1"
+ if got := hex.EncodeToString(p.Bytes()); got != want {
+ t.Errorf("zero: got %q, want %q", got, want)
+ }
+
+ one := "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+ s, _ = new(Scalar).SetBytesWithClamping(decodeHex(one))
+ p = new(Point).ScalarBaseMult(s)
+ want = "12e9a68b73fd5aacdbcaf3e88c46fea6ebedb1aa84eed1842f07f8edab65e3a7"
+ if got := hex.EncodeToString(p.Bytes()); got != want {
+ t.Errorf("one: got %q, want %q", got, want)
+ }
+}
+
+func bigIntFromLittleEndianBytes(b []byte) *big.Int {
+ bb := make([]byte, len(b))
+ for i := range b {
+ bb[i] = b[len(b)-i-1]
+ }
+ return new(big.Int).SetBytes(bb)
+}
+
+func TestScalarMultiplyDistributesOverAdd(t *testing.T) {
+ multiplyDistributesOverAdd := func(x, y, z Scalar) bool {
+ // Compute t1 = (x+y)*z
+ var t1 Scalar
+ t1.Add(&x, &y)
+ t1.Multiply(&t1, &z)
+
+ // Compute t2 = x*z + y*z
+ var t2 Scalar
+ var t3 Scalar
+ t2.Multiply(&x, &z)
+ t3.Multiply(&y, &z)
+ t2.Add(&t2, &t3)
+
+ reprT1, reprT2 := t1.Bytes(), t2.Bytes()
+
+ return t1 == t2 && isReduced(reprT1) && isReduced(reprT2)
+ }
+
+ if err := quick.Check(multiplyDistributesOverAdd, quickCheckConfig(1024)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarAddLikeSubNeg(t *testing.T) {
+ addLikeSubNeg := func(x, y Scalar) bool {
+ // Compute t1 = x - y
+ var t1 Scalar
+ t1.Subtract(&x, &y)
+
+ // Compute t2 = -y + x
+ var t2 Scalar
+ t2.Negate(&y)
+ t2.Add(&t2, &x)
+
+ return t1 == t2 && isReduced(t1.Bytes())
+ }
+
+ if err := quick.Check(addLikeSubNeg, quickCheckConfig(1024)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarNonAdjacentForm(t *testing.T) {
+ s, _ := (&Scalar{}).SetCanonicalBytes([]byte{
+ 0x1a, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d,
+ 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8, 0x26, 0x4d,
+ 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1,
+ 0x58, 0x9e, 0x7b, 0x7f, 0x23, 0x76, 0xef, 0x09,
+ })
+
+ expectedNaf := [256]int8{
+ 0, 13, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, -11, 0, 0, 0, 0, 3, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 9, 0, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 11, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0,
+ -9, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 9, 0,
+ 0, 0, 0, -15, 0, 0, 0, 0, -7, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, -3, 0,
+ 0, 0, 0, -11, 0, 0, 0, 0, -7, 0, 0, 0, 0, -13, 0, 0, 0, 0, 11, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, -15, 0, 0, 0, 0, 1, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 13, 0, 0, 0,
+ 0, 0, 0, 11, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 7,
+ 0, 0, 0, 0, 0, -15, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
+ }
+
+ sNaf := s.nonAdjacentForm(5)
+
+ for i := 0; i < 256; i++ {
+ if expectedNaf[i] != sNaf[i] {
+ t.Errorf("Wrong digit at position %d, got %d, expected %d", i, sNaf[i], expectedNaf[i])
+ }
+ }
+}
+
+type notZeroScalar Scalar
+
+func (notZeroScalar) Generate(rand *mathrand.Rand, size int) reflect.Value {
+ var s Scalar
+ var isNonZero uint64
+ for isNonZero == 0 {
+ s = Scalar{}.Generate(rand, size).Interface().(Scalar)
+ fiatScalarNonzero(&isNonZero, (*[4]uint64)(&s.s))
+ }
+ return reflect.ValueOf(notZeroScalar(s))
+}
+
+func TestScalarEqual(t *testing.T) {
+ if scOne.Equal(scMinusOne) == 1 {
+ t.Errorf("scOne.Equal(&scMinusOne) is true")
+ }
+ if scMinusOne.Equal(scMinusOne) == 0 {
+ t.Errorf("scMinusOne.Equal(&scMinusOne) is false")
+ }
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalarmult.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalarmult.go
new file mode 100644
index 0000000..f7ca3ce
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalarmult.go
@@ -0,0 +1,214 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import "sync"
+
+// basepointTable is a set of 32 affineLookupTables, where table i is generated
+// from 256i * basepoint. It is precomputed the first time it's used.
+func basepointTable() *[32]affineLookupTable {
+ basepointTablePrecomp.initOnce.Do(func() {
+ p := NewGeneratorPoint()
+ for i := 0; i < 32; i++ {
+ basepointTablePrecomp.table[i].FromP3(p)
+ for j := 0; j < 8; j++ {
+ p.Add(p, p)
+ }
+ }
+ })
+ return &basepointTablePrecomp.table
+}
+
+var basepointTablePrecomp struct {
+ table [32]affineLookupTable
+ initOnce sync.Once
+}
+
+// ScalarBaseMult sets v = x * B, where B is the canonical generator, and
+// returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarBaseMult(x *Scalar) *Point {
+ basepointTable := basepointTable()
+
+ // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i )
+ // as described in the Ed25519 paper
+ //
+ // Group even and odd coefficients
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B)
+ //
+ // We use a lookup table for each i to get x_i*16^(2*i)*B
+ // and do four doublings to multiply by 16.
+ digits := x.signedRadix16()
+
+ multiple := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+
+ // Accumulate the odd components first
+ v.Set(NewIdentityPoint())
+ for i := 1; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ // Multiply by 16
+ tmp2.FromP3(v) // tmp2 = v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords
+ v.fromP1xP1(tmp1) // now v = 16*(odd components)
+
+ // Accumulate the even components
+ for i := 0; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ return v
+}
+
+// ScalarMult sets v = x * q, and returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarMult(x *Scalar, q *Point) *Point {
+ checkInitialized(q)
+
+ var table projLookupTable
+ table.FromP3(q)
+
+ // Write x = sum(x_i * 16^i)
+ // so x*Q = sum( Q*x_i*16^i )
+ // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... )
+ // <------compute inside out---------
+ //
+ // We use the lookup table to get the x_i*Q values
+ // and do four doublings to compute 16*Q
+ digits := x.signedRadix16()
+
+ // Unwrap first loop iteration to save computing 16*identity
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ table.SelectInto(multiple, digits[63])
+
+ v.Set(NewIdentityPoint())
+ tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords
+ for i := 62; i >= 0; i-- {
+ tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
+ v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
+ table.SelectInto(multiple, digits[i])
+ tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords
+ }
+ v.fromP1xP1(tmp1)
+ return v
+}
+
+// basepointNafTable is the nafLookupTable8 for the basepoint.
+// It is precomputed the first time it's used.
+func basepointNafTable() *nafLookupTable8 {
+ basepointNafTablePrecomp.initOnce.Do(func() {
+ basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint())
+ })
+ return &basepointNafTablePrecomp.table
+}
+
+var basepointNafTablePrecomp struct {
+ table nafLookupTable8
+ initOnce sync.Once
+}
+
+// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical
+// generator, and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point {
+ checkInitialized(A)
+
+ // Similarly to the single variable-base approach, we compute
+ // digits and use them with a lookup table. However, because
+ // we are allowed to do variable-time operations, we don't
+ // need constant-time lookups or constant-time digit
+ // computations.
+ //
+ // So we use a non-adjacent form of some width w instead of
+ // radix 16. This is like a binary representation (one digit
+ // for each binary place) but we allow the digits to grow in
+ // magnitude up to 2^{w-1} so that the nonzero digits are as
+ // sparse as possible. Intuitively, this "condenses" the
+ // "mass" of the scalar onto sparse coefficients (meaning
+ // fewer additions).
+
+ basepointNafTable := basepointNafTable()
+ var aTable nafLookupTable5
+ aTable.FromP3(A)
+ // Because the basepoint is fixed, we can use a wider NAF
+ // corresponding to a bigger table.
+ aNaf := a.nonAdjacentForm(5)
+ bNaf := b.nonAdjacentForm(8)
+
+ // Find the first nonzero coefficient.
+ i := 255
+ for j := i; j >= 0; j-- {
+ if aNaf[j] != 0 || bNaf[j] != 0 {
+ break
+ }
+ }
+
+ multA := &projCached{}
+ multB := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp2.Zero()
+
+ // Move from high to low bits, doubling the accumulator
+ // at each iteration and checking whether there is a nonzero
+ // coefficient to look up a multiple of.
+ for ; i >= 0; i-- {
+ tmp1.Double(tmp2)
+
+ // Only update v if we have a nonzero coeff to add in.
+ if aNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, aNaf[i])
+ tmp1.Add(v, multA)
+ } else if aNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, -aNaf[i])
+ tmp1.Sub(v, multA)
+ }
+
+ if bNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, bNaf[i])
+ tmp1.AddAffine(v, multB)
+ } else if bNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, -bNaf[i])
+ tmp1.SubAffine(v, multB)
+ }
+
+ tmp2.FromP1xP1(tmp1)
+ }
+
+ v.fromP2(tmp2)
+ return v
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalarmult_test.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalarmult_test.go
new file mode 100644
index 0000000..4a00c79
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/scalarmult_test.go
@@ -0,0 +1,205 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "testing"
+ "testing/quick"
+)
+
+var (
+ // a random scalar generated using dalek.
+ dalekScalar, _ = (&Scalar{}).SetCanonicalBytes([]byte{219, 106, 114, 9, 174, 249, 155, 89, 69, 203, 201, 93, 92, 116, 234, 187, 78, 115, 103, 172, 182, 98, 62, 103, 187, 136, 13, 100, 248, 110, 12, 4})
+ // the above, times the edwards25519 basepoint.
+ dalekScalarBasepoint, _ = new(Point).SetBytes([]byte{0xf4, 0xef, 0x7c, 0xa, 0x34, 0x55, 0x7b, 0x9f, 0x72, 0x3b, 0xb6, 0x1e, 0xf9, 0x46, 0x9, 0x91, 0x1c, 0xb9, 0xc0, 0x6c, 0x17, 0x28, 0x2d, 0x8b, 0x43, 0x2b, 0x5, 0x18, 0x6a, 0x54, 0x3e, 0x48})
+)
+
+func TestScalarMultSmallScalars(t *testing.T) {
+ var z Scalar
+ var p Point
+ p.ScalarMult(&z, B)
+ if I.Equal(&p) != 1 {
+ t.Error("0*B != 0")
+ }
+ checkOnCurve(t, &p)
+
+ scEight, _ := (&Scalar{}).SetCanonicalBytes([]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
+ p.ScalarMult(scEight, B)
+ if B.Equal(&p) != 1 {
+ t.Error("1*B != 1")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestScalarMultVsDalek(t *testing.T) {
+ var p Point
+ p.ScalarMult(dalekScalar, B)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("Scalar mul does not match dalek")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestBaseMultVsDalek(t *testing.T) {
+ var p Point
+ p.ScalarBaseMult(dalekScalar)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("Scalar mul does not match dalek")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestVarTimeDoubleBaseMultVsDalek(t *testing.T) {
+ var p Point
+ var z Scalar
+ p.VarTimeDoubleScalarBaseMult(dalekScalar, B, &z)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("VarTimeDoubleScalarBaseMult fails with b=0")
+ }
+ checkOnCurve(t, &p)
+ p.VarTimeDoubleScalarBaseMult(&z, B, dalekScalar)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("VarTimeDoubleScalarBaseMult fails with a=0")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestScalarMultDistributesOverAdd(t *testing.T) {
+ scalarMultDistributesOverAdd := func(x, y Scalar) bool {
+ var z Scalar
+ z.Add(&x, &y)
+ var p, q, r, check Point
+ p.ScalarMult(&x, B)
+ q.ScalarMult(&y, B)
+ r.ScalarMult(&z, B)
+ check.Add(&p, &q)
+ checkOnCurve(t, &p, &q, &r, &check)
+ return check.Equal(&r) == 1
+ }
+
+ if err := quick.Check(scalarMultDistributesOverAdd, quickCheckConfig(32)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarMultNonIdentityPoint(t *testing.T) {
+ // Check whether p.ScalarMult and q.ScalaBaseMult give the same,
+ // when p and q are originally set to the base point.
+
+ scalarMultNonIdentityPoint := func(x Scalar) bool {
+ var p, q Point
+ p.Set(B)
+ q.Set(B)
+
+ p.ScalarMult(&x, B)
+ q.ScalarBaseMult(&x)
+
+ checkOnCurve(t, &p, &q)
+
+ return p.Equal(&q) == 1
+ }
+
+ if err := quick.Check(scalarMultNonIdentityPoint, quickCheckConfig(32)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestBasepointTableGeneration(t *testing.T) {
+ // The basepoint table is 32 affineLookupTables,
+ // corresponding to (16^2i)*B for table i.
+ basepointTable := basepointTable()
+
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp3 := &Point{}
+ tmp3.Set(B)
+ table := make([]affineLookupTable, 32)
+ for i := 0; i < 32; i++ {
+ // Build the table
+ table[i].FromP3(tmp3)
+ // Assert equality with the hardcoded one
+ if table[i] != basepointTable[i] {
+ t.Errorf("Basepoint table %d does not match", i)
+ }
+
+ // Set p = (16^2)*p = 256*p = 2^8*p
+ tmp2.FromP3(tmp3)
+ for j := 0; j < 7; j++ {
+ tmp1.Double(tmp2)
+ tmp2.FromP1xP1(tmp1)
+ }
+ tmp1.Double(tmp2)
+ tmp3.fromP1xP1(tmp1)
+ checkOnCurve(t, tmp3)
+ }
+}
+
+func TestScalarMultMatchesBaseMult(t *testing.T) {
+ scalarMultMatchesBaseMult := func(x Scalar) bool {
+ var p, q Point
+ p.ScalarMult(&x, B)
+ q.ScalarBaseMult(&x)
+ checkOnCurve(t, &p, &q)
+ return p.Equal(&q) == 1
+ }
+
+ if err := quick.Check(scalarMultMatchesBaseMult, quickCheckConfig(32)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestBasepointNafTableGeneration(t *testing.T) {
+ var table nafLookupTable8
+ table.FromP3(B)
+
+ if table != *basepointNafTable() {
+ t.Error("BasepointNafTable does not match")
+ }
+}
+
+func TestVarTimeDoubleBaseMultMatchesBaseMult(t *testing.T) {
+ varTimeDoubleBaseMultMatchesBaseMult := func(x, y Scalar) bool {
+ var p, q1, q2, check Point
+
+ p.VarTimeDoubleScalarBaseMult(&x, B, &y)
+
+ q1.ScalarBaseMult(&x)
+ q2.ScalarBaseMult(&y)
+ check.Add(&q1, &q2)
+
+ checkOnCurve(t, &p, &check, &q1, &q2)
+ return p.Equal(&check) == 1
+ }
+
+ if err := quick.Check(varTimeDoubleBaseMultMatchesBaseMult, quickCheckConfig(32)); err != nil {
+ t.Error(err)
+ }
+}
+
+// Benchmarks.
+
+func BenchmarkScalarBaseMult(b *testing.B) {
+ var p Point
+
+ for i := 0; i < b.N; i++ {
+ p.ScalarBaseMult(dalekScalar)
+ }
+}
+
+func BenchmarkScalarMult(b *testing.B) {
+ var p Point
+
+ for i := 0; i < b.N; i++ {
+ p.ScalarMult(dalekScalar, B)
+ }
+}
+
+func BenchmarkVarTimeDoubleScalarBaseMult(b *testing.B) {
+ var p Point
+
+ for i := 0; i < b.N; i++ {
+ p.VarTimeDoubleScalarBaseMult(dalekScalar, B, dalekScalar)
+ }
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/tables.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/tables.go
new file mode 100644
index 0000000..83234bb
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/tables.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/subtle"
+)
+
+// A dynamic lookup table for variable-base, constant-time scalar muls.
+type projLookupTable struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, constant-time scalar muls.
+type affineLookupTable struct {
+ points [8]affineCached
+}
+
+// A dynamic lookup table for variable-base, variable-time scalar muls.
+type nafLookupTable5 struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, variable-time scalar muls.
+type nafLookupTable8 struct {
+ points [64]affineCached
+}
+
+// Constructors.
+
+// Builds a lookup table at runtime. Fast.
+func (v *projLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to a projCached
+ // This is needlessly complicated because the API has explicit
+ // receivers instead of creating stack objects and relying on RVO
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *affineLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to affineCached
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
+ }
+}
+
+// Builds a lookup table at runtime. Fast.
+func (v *nafLookupTable5) FromP3(q *Point) {
+ // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q
+ // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *nafLookupTable8) FromP3(q *Point) {
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 63; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i])))
+ }
+}
+
+// Selectors.
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *projLookupTable) SelectInto(dest *projCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Given odd x with 0 < x < 2^4, return x*Q (in variable time).
+func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) {
+ *dest = v.points[x/2]
+}
+
+// Given odd x with 0 < x < 2^7, return x*Q (in variable time).
+func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) {
+ *dest = v.points[x/2]
+}
diff --git a/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/tables_test.go b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/tables_test.go
new file mode 100644
index 0000000..b5d161a
--- /dev/null
+++ b/dependencies/pkg/mod/filippo.io/edwards25519@v1.1.0/tables_test.go
@@ -0,0 +1,119 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "testing"
+)
+
+func TestProjLookupTable(t *testing.T) {
+ var table projLookupTable
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3 projCached
+ table.SelectInto(&tmp1, 6)
+ table.SelectInto(&tmp2, -2)
+ table.SelectInto(&tmp3, -4)
+ // Expect T1 + T2 + T3 = identity
+
+ var accP1xP1 projP1xP1
+ accP3 := NewIdentityPoint()
+
+ accP1xP1.Add(accP3, &tmp1)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(accP3, &tmp2)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(accP3, &tmp3)
+ accP3.fromP1xP1(&accP1xP1)
+
+ if accP3.Equal(I) != 1 {
+ t.Errorf("Consistency check on ProjLookupTable.SelectInto failed! %x %x %x", tmp1, tmp2, tmp3)
+ }
+}
+
+func TestAffineLookupTable(t *testing.T) {
+ var table affineLookupTable
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3 affineCached
+ table.SelectInto(&tmp1, 3)
+ table.SelectInto(&tmp2, -7)
+ table.SelectInto(&tmp3, 4)
+ // Expect T1 + T2 + T3 = identity
+
+ var accP1xP1 projP1xP1
+ accP3 := NewIdentityPoint()
+
+ accP1xP1.AddAffine(accP3, &tmp1)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(accP3, &tmp2)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(accP3, &tmp3)
+ accP3.fromP1xP1(&accP1xP1)
+
+ if accP3.Equal(I) != 1 {
+ t.Errorf("Consistency check on ProjLookupTable.SelectInto failed! %x %x %x", tmp1, tmp2, tmp3)
+ }
+}
+
+func TestNafLookupTable5(t *testing.T) {
+ var table nafLookupTable5
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3, tmp4 projCached
+ table.SelectInto(&tmp1, 9)
+ table.SelectInto(&tmp2, 11)
+ table.SelectInto(&tmp3, 7)
+ table.SelectInto(&tmp4, 13)
+ // Expect T1 + T2 = T3 + T4
+
+ var accP1xP1 projP1xP1
+ lhs := NewIdentityPoint()
+ rhs := NewIdentityPoint()
+
+ accP1xP1.Add(lhs, &tmp1)
+ lhs.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(lhs, &tmp2)
+ lhs.fromP1xP1(&accP1xP1)
+
+ accP1xP1.Add(rhs, &tmp3)
+ rhs.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(rhs, &tmp4)
+ rhs.fromP1xP1(&accP1xP1)
+
+ if lhs.Equal(rhs) != 1 {
+ t.Errorf("Consistency check on nafLookupTable5 failed")
+ }
+}
+
+func TestNafLookupTable8(t *testing.T) {
+ var table nafLookupTable8
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3, tmp4 affineCached
+ table.SelectInto(&tmp1, 49)
+ table.SelectInto(&tmp2, 11)
+ table.SelectInto(&tmp3, 35)
+ table.SelectInto(&tmp4, 25)
+ // Expect T1 + T2 = T3 + T4
+
+ var accP1xP1 projP1xP1
+ lhs := NewIdentityPoint()
+ rhs := NewIdentityPoint()
+
+ accP1xP1.AddAffine(lhs, &tmp1)
+ lhs.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(lhs, &tmp2)
+ lhs.fromP1xP1(&accP1xP1)
+
+ accP1xP1.AddAffine(rhs, &tmp3)
+ rhs.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(rhs, &tmp4)
+ rhs.fromP1xP1(&accP1xP1)
+
+ if lhs.Equal(rhs) != 1 {
+ t.Errorf("Consistency check on nafLookupTable8 failed")
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/.github/workflows/test.yml b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/.github/workflows/test.yml
deleted file mode 100644
index 606cf83..0000000
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/.github/workflows/test.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-name: Test
-
-on:
- push:
- branches: [main]
- pull_request:
-
-jobs:
- test:
- strategy:
- matrix:
- go-version: [1.16.x, 1.17.x]
- os: [ubuntu-latest, macos-latest, windows-latest]
- runs-on: ${{ matrix.os }}
-
- steps:
- - name: Install go
- uses: actions/setup-go@v2
- with:
- go-version: ${{ matrix.go-version }}
-
- - name: Check out code
- uses: actions/checkout@v2
-
- - name: Test
- run: go test -count 1 -bench . -benchtime 1x ./...
-
- - name: Test with -tags purego
- run: go test -count 1 -bench . -benchtime 1x -tags purego ./...
-
-# TODO: Test on other architectures. Unfortunately only amd64 is supported
-# by GH Actions. We could use QEMU in the meantime.
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.s b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.s
deleted file mode 100644
index be8db5b..0000000
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.s
+++ /dev/null
@@ -1,215 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-#include "textflag.h"
-
-// Register allocation:
-// AX h
-// SI pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// DI prime4v
-
-// round reads from and advances the buffer pointer in SI.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (SI), R12 \
- ADDQ $8, SI \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ DI, acc
-
-// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
- // Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), DI
-
- // Load slice.
- MOVQ b_base+0(FP), SI
- MOVQ b_len+8(FP), DX
- LEAQ (SI)(DX*1), BX
-
- // The first loop limit will be len(b)-32.
- SUBQ $32, BX
-
- // Check whether we have at least one block.
- CMPQ DX, $32
- JLT noBlocks
-
- // Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
-
- // Loop until SI > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
-
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
-
- JMP afterBlocks
-
-noBlocks:
- MOVQ ·prime5v(SB), AX
-
-afterBlocks:
- ADDQ DX, AX
-
- // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
- ADDQ $24, BX
-
- CMPQ SI, BX
- JG fourByte
-
-wordLoop:
- // Calculate k1.
- MOVQ (SI), R8
- ADDQ $8, SI
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
-
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ DI, AX
-
- CMPQ SI, BX
- JLE wordLoop
-
-fourByte:
- ADDQ $4, BX
- CMPQ SI, BX
- JG singles
-
- MOVL (SI), R8
- ADDQ $4, SI
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ SI, BX
- JGE finalize
-
-singlesLoop:
- MOVBQZX (SI), R12
- ADDQ $1, SI
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
-
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ SI, BX
- JL singlesLoop
-
-finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
-
- MOVQ AX, ret+24(FP)
- RET
-
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
-// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
- // Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
-
- // Load slice.
- MOVQ b_base+8(FP), SI
- MOVQ b_len+16(FP), DX
- LEAQ (SI)(DX*1), BX
- SUBQ $32, BX
-
- // Load vN from d.
- MOVQ d+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
-
- // We don't need to check the loop condition here; this function is
- // always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
-
- // Copy vN back to d.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
-
- // The number of bytes written is SI minus the old base pointer.
- SUBQ b_base+8(FP), SI
- MOVQ SI, ret+32(FP)
-
- RET
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/.github/workflows/test.yml b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/.github/workflows/test.yml
new file mode 100644
index 0000000..542b91f
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/.github/workflows/test.yml
@@ -0,0 +1,56 @@
+name: Test
+
+on:
+ push:
+ branches: [main]
+ pull_request:
+
+jobs:
+ test:
+ strategy:
+ matrix:
+ go-version: [1.16.x, 1.17.x]
+ os: [ubuntu-latest, macos-latest, windows-latest]
+ runs-on: ${{ matrix.os }}
+
+ steps:
+ - name: Install go
+ uses: WillAbides/setup-go-faster@v1.5.0
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Check out code
+ uses: actions/checkout@v2
+
+ - name: Test
+ run: go test -count 1 -bench . -benchtime 1x ./...
+
+ - name: Test with -tags purego
+ run: go test -count 1 -bench . -benchtime 1x -tags purego ./...
+
+ test-qemu:
+ needs: test
+ strategy:
+ matrix:
+ go-version: [1.16.x, 1.17.x]
+ arch: [386, arm, arm64]
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Install go
+ uses: WillAbides/setup-go-faster@v1.5.0
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Install QEMU
+ uses: docker/setup-qemu-action@v1
+
+ - name: Check out code
+ uses: actions/checkout@v2
+
+ - name: Run test via qemu/binfmt
+ # TODO: Run the dynamic linking tests as well. That is a little more
+ # involved.
+ run: go test -v -count 1 -bench . -benchtime 1x
+ env:
+ GOARCH: ${{ matrix.arch }}
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/LICENSE.txt b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/LICENSE.txt
index 24b5306..24b5306 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/LICENSE.txt
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/LICENSE.txt
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/README.md b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/README.md
index 792b4a6..8bf0e5b 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/README.md
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/README.md
@@ -3,8 +3,7 @@
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
@@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
## Compatibility
@@ -45,19 +47,20 @@ I recommend using the latest release of Go.
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+| input size | purego | asm |
+| ---------- | --------- | --------- |
+| 4 B | 1.3 GB/s | 1.2 GB/s |
+| 16 B | 2.9 GB/s | 3.5 GB/s |
+| 100 B | 6.9 GB/s | 8.1 GB/s |
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
```
## Projects using this package
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/bench_test.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/bench_test.go
index 4dfeb91..1633823 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/bench_test.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/bench_test.go
@@ -10,6 +10,7 @@ var benchmarks = []struct {
n int64
}{
{"4B", 4},
+ {"16B", 16},
{"100B", 100},
{"4KB", 4e3},
{"10MB", 10e6},
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/.gitignore b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/.gitignore
index 8a84f19..8a84f19 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/.gitignore
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/.gitignore
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/dynamic_test.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/dynamic_test.go
index c86bc93..4766b58 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/dynamic_test.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/dynamic_test.go
@@ -1,3 +1,4 @@
+//go:build linux || darwin
// +build linux darwin
package main
@@ -5,6 +6,7 @@ package main
import (
"bytes"
"log"
+ "os"
"os/exec"
"plugin"
"testing"
@@ -25,7 +27,7 @@ func TestMain(m *testing.M) {
if err := cmd.Run(); err != nil {
log.Fatalf("Error building plugin: %s\nOutput:\n%s", err, out.String())
}
- m.Run()
+ os.Exit(m.Run())
}
func TestDynamic(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/plugin.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/plugin.go
index 319ed71..fcf3e73 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/plugin.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/plugin.go
@@ -1,3 +1,4 @@
+//go:build ignore
// +build ignore
package main
@@ -12,7 +13,7 @@ import (
const (
in = "Call me Ishmael. Some years ago--never mind how long precisely-"
- want = 0x02a2e85470d6fd96
+ want = uint64(0x02a2e85470d6fd96)
)
func TestSum(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.mod b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.mod
index 49f6760..49f6760 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.mod
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.mod
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.sum b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.sum
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.sum
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.sum
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/testall.sh b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/testall.sh
new file mode 100644
index 0000000..94b9c44
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/testall.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -eu -o pipefail
+
+# Small convenience script for running the tests with various combinations of
+# arch/tags. This assumes we're running on amd64 and have qemu available.
+
+go test ./...
+go test -tags purego ./...
+GOARCH=arm64 go test
+GOARCH=arm64 go test -tags purego
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash.go
index 15c835d..a9e0d45 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash.go
@@ -16,19 +16,11 @@ const (
prime5 uint64 = 2870177450012600261
)
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
type Digest struct {
@@ -50,10 +42,10 @@ func New() *Digest {
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
- d.v1 = prime1v + prime2
+ d.v1 = primes[0] + prime2
d.v2 = prime2
d.v3 = 0
- d.v4 = -prime1v
+ d.v4 = -primes[0]
d.total = 0
d.n = 0
}
@@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
+ memleft := d.mem[d.n&(len(d.mem)-1):]
+
if d.n+n < 32 {
// This new data doesn't even fill the current block.
- copy(d.mem[d.n:], b)
+ copy(memleft, b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
- copy(d.mem[d.n:], b)
+ c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
- b = b[32-d.n:]
+ b = b[c:]
d.n = 0
}
@@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
h += d.total
- i, end := 0, d.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(d.mem[i:i+8]))
+ b := d.mem[:d.n&(len(d.mem)-1)]
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for i < end {
- h ^= uint64(d.mem[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
- i++
}
h ^= h >> 33
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_amd64.s b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_amd64.s
new file mode 100644
index 0000000..3e8b132
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_amd64.s
@@ -0,0 +1,209 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define h AX
+#define d AX
+#define p SI // pointer to advance through b
+#define n DX
+#define end BX // loop end
+#define v1 R8
+#define v2 R9
+#define v3 R10
+#define v4 R11
+#define x R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
+
+#define round(acc, x) \
+ IMULQ prime2, x \
+ ADDQ x, acc \
+ ROLQ $31, acc \
+ IMULQ prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ IMULQ prime2, x \
+ ROLQ $31, x \
+ IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+ round0(x) \
+ XORQ x, acc \
+ IMULQ prime1, acc \
+ ADDQ prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop: \
+ MOVQ +0(p), x \
+ round(v1, x) \
+ MOVQ +8(p), x \
+ round(v2, x) \
+ MOVQ +16(p), x \
+ round(v3, x) \
+ MOVQ +24(p), x \
+ round(v4, x) \
+ ADDQ $32, p \
+ CMPQ p, end \
+ JLE loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ // Load fixed primes.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+ MOVQ ·primes+24(SB), prime4
+
+ // Load slice.
+ MOVQ b_base+0(FP), p
+ MOVQ b_len+8(FP), n
+ LEAQ (p)(n*1), end
+
+ // The first loop limit will be len(b)-32.
+ SUBQ $32, end
+
+ // Check whether we have at least one block.
+ CMPQ n, $32
+ JLT noBlocks
+
+ // Set up initial state (v1, v2, v3, v4).
+ MOVQ prime1, v1
+ ADDQ prime2, v1
+ MOVQ prime2, v2
+ XORQ v3, v3
+ XORQ v4, v4
+ SUBQ prime1, v4
+
+ blockLoop()
+
+ MOVQ v1, h
+ ROLQ $1, h
+ MOVQ v2, x
+ ROLQ $7, x
+ ADDQ x, h
+ MOVQ v3, x
+ ROLQ $12, x
+ ADDQ x, h
+ MOVQ v4, x
+ ROLQ $18, x
+ ADDQ x, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+ JMP afterBlocks
+
+noBlocks:
+ MOVQ ·primes+32(SB), h
+
+afterBlocks:
+ ADDQ n, h
+
+ ADDQ $24, end
+ CMPQ p, end
+ JG try4
+
+loop8:
+ MOVQ (p), x
+ ADDQ $8, p
+ round0(x)
+ XORQ x, h
+ ROLQ $27, h
+ IMULQ prime1, h
+ ADDQ prime4, h
+
+ CMPQ p, end
+ JLE loop8
+
+try4:
+ ADDQ $4, end
+ CMPQ p, end
+ JG try1
+
+ MOVL (p), x
+ ADDQ $4, p
+ IMULQ prime1, x
+ XORQ x, h
+
+ ROLQ $23, h
+ IMULQ prime2, h
+ ADDQ ·primes+16(SB), h
+
+try1:
+ ADDQ $4, end
+ CMPQ p, end
+ JGE finalize
+
+loop1:
+ MOVBQZX (p), x
+ ADDQ $1, p
+ IMULQ ·primes+32(SB), x
+ XORQ x, h
+ ROLQ $11, h
+ IMULQ prime1, h
+
+ CMPQ p, end
+ JL loop1
+
+finalize:
+ MOVQ h, x
+ SHRQ $33, x
+ XORQ x, h
+ IMULQ prime2, h
+ MOVQ h, x
+ SHRQ $29, x
+ XORQ x, h
+ IMULQ ·primes+16(SB), h
+ MOVQ h, x
+ SHRQ $32, x
+ XORQ x, h
+
+ MOVQ h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ // Load fixed primes needed for round.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+
+ // Load slice.
+ MOVQ b_base+8(FP), p
+ MOVQ b_len+16(FP), n
+ LEAQ (p)(n*1), end
+ SUBQ $32, end
+
+ // Load vN from d.
+ MOVQ s+0(FP), d
+ MOVQ 0(d), v1
+ MOVQ 8(d), v2
+ MOVQ 16(d), v3
+ MOVQ 24(d), v4
+
+ // We don't need to check the loop condition here; this function is
+ // always called with at least one block of data to process.
+ blockLoop()
+
+ // Copy vN back to d.
+ MOVQ v1, 0(d)
+ MOVQ v2, 8(d)
+ MOVQ v3, 16(d)
+ MOVQ v4, 24(d)
+
+ // The number of bytes written is p minus the old base pointer.
+ SUBQ b_base+8(FP), p
+ MOVQ p, ret+32(FP)
+
+ RET
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_arm64.s b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_arm64.s
new file mode 100644
index 0000000..7e3145a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_arm64.s
@@ -0,0 +1,183 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define digest R1
+#define h R2 // return value
+#define p R3 // input pointer
+#define n R4 // input length
+#define nblocks R5 // n / 32
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x
+
+#define mergeRound(acc, x) \
+ round0(x) \
+ EOR x, acc \
+ MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+ LSR $5, n, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 16(p), (x1, x2) \
+ LDP.P 16(p), (x3, x4) \
+ round(v1, x1) \
+ round(v2, x2) \
+ round(v3, x3) \
+ round(v4, x4) \
+ SUB $1, nblocks \
+ CBNZ nblocks, loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ LDP b_base+0(FP), (p, n)
+
+ LDP ·primes+0(SB), (prime1, prime2)
+ LDP ·primes+16(SB), (prime3, prime4)
+ MOVD ·primes+32(SB), prime5
+
+ CMP $32, n
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+ BLT afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blockLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+afterLoop:
+ ADD n, h
+
+ TBZ $4, n, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
+ // rotated register) is worth a small but measurable speedup for small
+ // inputs.
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, n, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, n, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, n, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, n, finalize
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h, h
+ MUL prime1, h
+
+finalize:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ LDP ·primes+0(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD d+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, n)
+
+ blockLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, n
+ MOVD n, ret+32(FP)
+ RET
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_asm.go
index ad14b80..9216e0a 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_asm.go
@@ -1,3 +1,5 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego
+// +build amd64 arm64
// +build !appengine
// +build gc
// +build !purego
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_other.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_other.go
index 4a5a821..26df13b 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_other.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_other.go
@@ -1,4 +1,5 @@
-// +build !amd64 appengine !gc purego
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
+// +build !amd64,!arm64 appengine !gc purego
package xxhash
@@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
var h uint64
if n >= 32 {
- v1 := prime1v + prime2
+ v1 := primes[0] + prime2
v2 := prime2
v3 := uint64(0)
- v4 := -prime1v
+ v4 := -primes[0]
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
@@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
h += uint64(n)
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
}
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_safe.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_safe.go
index fc9bea7..e86f1b5 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_safe.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_safe.go
@@ -1,3 +1,4 @@
+//go:build appengine
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_test.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_test.go
index 6330f19..6330f19 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_test.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_test.go
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe.go
index 376e0ca..1c1638f 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe.go
@@ -1,3 +1,4 @@
+//go:build !appengine
// +build !appengine
// This file encapsulates usage of unsafe.
@@ -11,7 +12,7 @@ import (
// In the future it's possible that compiler optimizations will make these
// XxxString functions unnecessary by realizing that calls such as
-// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe_test.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe_test.go
index 6598267..6d6f93c 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe_test.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe_test.go
@@ -1,3 +1,4 @@
+//go:build !appengine
// +build !appengine
package xxhash
@@ -25,7 +26,7 @@ func TestStringAllocs(t *testing.T) {
})
}
-// This test is inspired by the Go runtime tests in https://golang.org/cl/57410.
+// This test is inspired by the Go runtime tests in https://go.dev/cl/57410.
// It asserts that certain important functions may be inlined.
func TestInlining(t *testing.T) {
funcs := map[string]struct{}{
@@ -33,8 +34,6 @@ func TestInlining(t *testing.T) {
"(*Digest).WriteString": {},
}
- // TODO: it would be better to use the go binary that is running
- // 'go test' (if we are running under 'go test').
cmd := exec.Command("go", "test", "-gcflags=-m", "-run", "xxxx")
out, err := cmd.CombinedOutput()
if err != nil {
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/.gitignore b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/.gitignore
index 2c88f1d..2c88f1d 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/.gitignore
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/.gitignore
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/xxhsum.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/xxhsum.go
index 9b1d035..9b1d035 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/xxhsum.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/xxhsum.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml
deleted file mode 100644
index 5fcfeae..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-name: Lint Commit Messages
-on: [pull_request]
-
-jobs:
- commitlint:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- with:
- fetch-depth: 0
- - uses: wagoid/commitlint-github-action@v4
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml
deleted file mode 100644
index 28c16c5..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: golangci-lint
-
-on:
- push:
- tags:
- - v*
- branches:
- - master
- - main
- pull_request:
-
-jobs:
- golangci:
- name: lint
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - name: golangci-lint
- uses: golangci/golangci-lint-action@v2
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml
deleted file mode 100644
index 685693a..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-name: Releases
-
-on:
- push:
- tags:
- - 'v*'
-
-jobs:
- build:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - uses: ncipollo/release-action@v1
- with:
- body:
- Please refer to
- [CHANGELOG.md](https://github.com/go-redis/redis/blob/master/CHANGELOG.md) for details
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore
deleted file mode 100644
index b975a7b..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*.rdb
-testdata/*/
-.idea/
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md
deleted file mode 100644
index 195e519..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md
+++ /dev/null
@@ -1,177 +0,0 @@
-## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
-
-
-### Bug Fixes
-
-* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
-* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
-* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
-* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
-* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
-* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
-* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
-* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
-* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
-
-
-### Features
-
-* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
-* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
-* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
-* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
-* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
-* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
-* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
-
-
-
-## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
-
-
-### Features
-
-* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
-* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
-* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
-
-
-
-## v8.11
-
-- Remove OpenTelemetry metrics.
-- Supports more redis commands and options.
-
-## v8.10
-
-- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
- single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
- decision:
-
- - Traces become smaller and less noisy.
- - It may be costly to process those 3 extra spans for each query.
- - go-redis no longer depends on OpenTelemetry.
-
- Eventually we hope to replace the information that we no longer collect with OpenTelemetry
- Metrics.
-
-## v8.9
-
-- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
- `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
-
-## v8.8
-
-- To make updating easier, extra modules now have the same version as go-redis does. That means that
- you need to update your imports:
-
-```
-github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
-github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
-```
-
-## v8.5
-
-- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
- struct:
-
-```go
-err := rdb.HGetAll(ctx, "hash").Scan(&data)
-
-err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
-```
-
-- Please check [redismock](https://github.com/go-redis/redismock) by
- [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
-
-## v8
-
-- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
- using `context.Context` yet, the simplest option is to define global package variable
- `var ctx = context.TODO()` and use it when `ctx` is required.
-
-- Full support for `context.Context` canceling.
-
-- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
-
-- Added `redisext.OpenTemetryHook` that adds
- [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
-
-- Redis slow log support.
-
-- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
- existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
-
-```go
-import "github.com/golang/groupcache/consistenthash"
-
-ring := redis.NewRing(&redis.RingOptions{
- NewConsistentHash: func() {
- return consistenthash.New(100, crc32.ChecksumIEEE)
- },
-})
-```
-
-- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
-- `Options.MaxRetries` default value is changed from 0 to 3.
-
-- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
-
-## v7.3
-
-- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
- URL contains username.
-
-## v7.2
-
-- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
-
-## v7.1
-
-- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
- interface.
-
-## v7
-
-- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
- transactional pipeline.
-- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
-- WithContext now can not be used to create a shallow copy of the client.
-- New methods ProcessContext, DoContext, and ExecContext.
-- Client respects Context.Deadline when setting net.Conn deadline.
-- Client listens on Context.Done while waiting for a connection from the pool and returns an error
- when context context is cancelled.
-- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
- detecting reconnections.
-- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
- the time.
-- `SetLimiter` is removed and added `Options.Limiter` instead.
-- `HMSet` is deprecated as of Redis v4.
-
-## v6.15
-
-- Cluster and Ring pipelines process commands for each node in its own goroutine.
-
-## 6.14
-
-- Added Options.MinIdleConns.
-- Added Options.MaxConnAge.
-- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
-- Add Client.Do to simplify creating custom commands.
-- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
-- Lower memory usage.
-
-## v6.13
-
-- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
- `HashReplicas = 1000` for better keys distribution between shards.
-- Cluster client was optimized to use much less memory when reloading cluster state.
-- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
- occurres. In most cases it is recommended to use PubSub.Channel instead.
-- Dialer.KeepAlive is set to 5 minutes by default.
-
-## v6.12
-
-- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
- Servers that don't have cluster mode enabled. See
- https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile
deleted file mode 100644
index a4cfe05..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
-
-test: testdeps
- go test ./...
- go test ./... -short -race
- go test ./... -run=NONE -bench=. -benchmem
- env GOOS=linux GOARCH=386 go test ./...
- go vet
-
-testdeps: testdata/redis/src/redis-server
-
-bench: testdeps
- go test ./... -test.run=NONE -test.bench=. -test.benchmem
-
-.PHONY: all test testdeps bench
-
-testdata/redis:
- mkdir -p $@
- wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
-
-testdata/redis/src/redis-server: testdata/redis
- cd $< && make all
-
-fmt:
- gofmt -w -s ./
- goimports -w -local github.com/go-redis/redis ./
-
-go_mod_tidy:
- go get -u && go mod tidy
- set -e; for dir in $(PACKAGE_DIRS); do \
- echo "go mod tidy in $${dir}"; \
- (cd "$${dir}" && \
- go get -u && \
- go mod tidy); \
- done
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md
deleted file mode 100644
index f3b6a01..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# Redis client for Go
-
-![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg)
-[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
-[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
-
-go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
-Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
-OpenTelemetry and ClickHouse. Give it a star as well!
-
-## Resources
-
-- [Discussions](https://github.com/go-redis/redis/discussions)
-- [Documentation](https://redis.uptrace.dev)
-- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
-- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
-- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
-
-Other projects you may like:
-
-- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
-- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
-
-## Ecosystem
-
-- [Redis Mock](https://github.com/go-redis/redismock)
-- [Distributed Locks](https://github.com/bsm/redislock)
-- [Redis Cache](https://github.com/go-redis/cache)
-- [Rate limiting](https://github.com/go-redis/redis_rate)
-
-## Features
-
-- Redis 3 commands except QUIT, MONITOR, and SYNC.
-- Automatic connection pooling with
- [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
-- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
-- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
-- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
- [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
-- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
-- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
-- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
-- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
-- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
- without using cluster mode and Redis Sentinel.
-- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
-- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
-
-## Installation
-
-go-redis supports 2 last Go versions and requires a Go version with
-[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
-module:
-
-```shell
-go mod init github.com/my/repo
-```
-
-And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
-
-```shell
-go get github.com/go-redis/redis/v8
-```
-
-## Quickstart
-
-```go
-import (
- "context"
- "github.com/go-redis/redis/v8"
- "fmt"
-)
-
-var ctx = context.Background()
-
-func ExampleClient() {
- rdb := redis.NewClient(&redis.Options{
- Addr: "localhost:6379",
- Password: "", // no password set
- DB: 0, // use default DB
- })
-
- err := rdb.Set(ctx, "key", "value", 0).Err()
- if err != nil {
- panic(err)
- }
-
- val, err := rdb.Get(ctx, "key").Result()
- if err != nil {
- panic(err)
- }
- fmt.Println("key", val)
-
- val2, err := rdb.Get(ctx, "key2").Result()
- if err == redis.Nil {
- fmt.Println("key2 does not exist")
- } else if err != nil {
- panic(err)
- } else {
- fmt.Println("key2", val2)
- }
- // Output: key value
- // key2 does not exist
-}
-```
-
-## Look and feel
-
-Some corner cases:
-
-```go
-// SET key value EX 10 NX
-set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
-
-// SET key value keepttl NX
-set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
-
-// SORT list LIMIT 0 2 ASC
-vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
-
-// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
-vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
- Min: "-inf",
- Max: "+inf",
- Offset: 0,
- Count: 2,
-}).Result()
-
-// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
-vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
- Keys: []string{"zset1", "zset2"},
- Weights: []int64{2, 3}
-}).Result()
-
-// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
-vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
-
-// custom command
-res, err := rdb.Do(ctx, "set", "key", "value").Result()
-```
-
-## Run the test
-
-go-redis will start a redis-server and run the test cases.
-
-The paths of redis-server bin file and redis config file are defined in `main_test.go`:
-
-```
-var (
- redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
- redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
-)
-```
-
-For local testing, you can change the variables to refer to your local files, or create a soft link
-to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
-
-```
-ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
-cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
-```
-
-Lastly, run:
-
-```
-go test
-```
-
-## Contributors
-
-Thanks to all the people who already contributed!
-
-<a href="https://github.com/go-redis/redis/graphs/contributors">
- <img src="https://contributors-img.web.app/image?repo=go-redis/redis" />
-</a>
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go
deleted file mode 100644
index 4bb12a8..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go
+++ /dev/null
@@ -1,3478 +0,0 @@
-package redis
-
-import (
- "context"
- "fmt"
- "net"
- "strconv"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hscan"
- "github.com/go-redis/redis/v8/internal/proto"
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-type Cmder interface {
- Name() string
- FullName() string
- Args() []interface{}
- String() string
- stringArg(int) string
- firstKeyPos() int8
- SetFirstKeyPos(int8)
-
- readTimeout() *time.Duration
- readReply(rd *proto.Reader) error
-
- SetErr(error)
- Err() error
-}
-
-func setCmdsErr(cmds []Cmder, e error) {
- for _, cmd := range cmds {
- if cmd.Err() == nil {
- cmd.SetErr(e)
- }
- }
-}
-
-func cmdsFirstErr(cmds []Cmder) error {
- for _, cmd := range cmds {
- if err := cmd.Err(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeCmds(wr *proto.Writer, cmds []Cmder) error {
- for _, cmd := range cmds {
- if err := writeCmd(wr, cmd); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeCmd(wr *proto.Writer, cmd Cmder) error {
- return wr.WriteArgs(cmd.Args())
-}
-
-func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
- if pos := cmd.firstKeyPos(); pos != 0 {
- return int(pos)
- }
-
- switch cmd.Name() {
- case "eval", "evalsha":
- if cmd.stringArg(2) != "0" {
- return 3
- }
-
- return 0
- case "publish":
- return 1
- case "memory":
- // https://github.com/redis/redis/issues/7493
- if cmd.stringArg(1) == "usage" {
- return 2
- }
- }
-
- if info != nil {
- return int(info.FirstKeyPos)
- }
- return 0
-}
-
-func cmdString(cmd Cmder, val interface{}) string {
- b := make([]byte, 0, 64)
-
- for i, arg := range cmd.Args() {
- if i > 0 {
- b = append(b, ' ')
- }
- b = internal.AppendArg(b, arg)
- }
-
- if err := cmd.Err(); err != nil {
- b = append(b, ": "...)
- b = append(b, err.Error()...)
- } else if val != nil {
- b = append(b, ": "...)
- b = internal.AppendArg(b, val)
- }
-
- return internal.String(b)
-}
-
-//------------------------------------------------------------------------------
-
-type baseCmd struct {
- ctx context.Context
- args []interface{}
- err error
- keyPos int8
-
- _readTimeout *time.Duration
-}
-
-var _ Cmder = (*Cmd)(nil)
-
-func (cmd *baseCmd) Name() string {
- if len(cmd.args) == 0 {
- return ""
- }
- // Cmd name must be lower cased.
- return internal.ToLower(cmd.stringArg(0))
-}
-
-func (cmd *baseCmd) FullName() string {
- switch name := cmd.Name(); name {
- case "cluster", "command":
- if len(cmd.args) == 1 {
- return name
- }
- if s2, ok := cmd.args[1].(string); ok {
- return name + " " + s2
- }
- return name
- default:
- return name
- }
-}
-
-func (cmd *baseCmd) Args() []interface{} {
- return cmd.args
-}
-
-func (cmd *baseCmd) stringArg(pos int) string {
- if pos < 0 || pos >= len(cmd.args) {
- return ""
- }
- arg := cmd.args[pos]
- switch v := arg.(type) {
- case string:
- return v
- default:
- // TODO: consider using appendArg
- return fmt.Sprint(v)
- }
-}
-
-func (cmd *baseCmd) firstKeyPos() int8 {
- return cmd.keyPos
-}
-
-func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
- cmd.keyPos = keyPos
-}
-
-func (cmd *baseCmd) SetErr(e error) {
- cmd.err = e
-}
-
-func (cmd *baseCmd) Err() error {
- return cmd.err
-}
-
-func (cmd *baseCmd) readTimeout() *time.Duration {
- return cmd._readTimeout
-}
-
-func (cmd *baseCmd) setReadTimeout(d time.Duration) {
- cmd._readTimeout = &d
-}
-
-//------------------------------------------------------------------------------
-
-type Cmd struct {
- baseCmd
-
- val interface{}
-}
-
-func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
- return &Cmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *Cmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *Cmd) SetVal(val interface{}) {
- cmd.val = val
-}
-
-func (cmd *Cmd) Val() interface{} {
- return cmd.val
-}
-
-func (cmd *Cmd) Result() (interface{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *Cmd) Text() (string, error) {
- if cmd.err != nil {
- return "", cmd.err
- }
- return toString(cmd.val)
-}
-
-func toString(val interface{}) (string, error) {
- switch val := val.(type) {
- case string:
- return val, nil
- default:
- err := fmt.Errorf("redis: unexpected type=%T for String", val)
- return "", err
- }
-}
-
-func (cmd *Cmd) Int() (int, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- switch val := cmd.val.(type) {
- case int64:
- return int(val), nil
- case string:
- return strconv.Atoi(val)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Int", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Int64() (int64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toInt64(cmd.val)
-}
-
-func toInt64(val interface{}) (int64, error) {
- switch val := val.(type) {
- case int64:
- return val, nil
- case string:
- return strconv.ParseInt(val, 10, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Uint64() (uint64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toUint64(cmd.val)
-}
-
-func toUint64(val interface{}) (uint64, error) {
- switch val := val.(type) {
- case int64:
- return uint64(val), nil
- case string:
- return strconv.ParseUint(val, 10, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Float32() (float32, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toFloat32(cmd.val)
-}
-
-func toFloat32(val interface{}) (float32, error) {
- switch val := val.(type) {
- case int64:
- return float32(val), nil
- case string:
- f, err := strconv.ParseFloat(val, 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Float64() (float64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toFloat64(cmd.val)
-}
-
-func toFloat64(val interface{}) (float64, error) {
- switch val := val.(type) {
- case int64:
- return float64(val), nil
- case string:
- return strconv.ParseFloat(val, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Bool() (bool, error) {
- if cmd.err != nil {
- return false, cmd.err
- }
- return toBool(cmd.val)
-}
-
-func toBool(val interface{}) (bool, error) {
- switch val := val.(type) {
- case int64:
- return val != 0, nil
- case string:
- return strconv.ParseBool(val)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
- return false, err
- }
-}
-
-func (cmd *Cmd) Slice() ([]interface{}, error) {
- if cmd.err != nil {
- return nil, cmd.err
- }
- switch val := cmd.val.(type) {
- case []interface{}:
- return val, nil
- default:
- return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
- }
-}
-
-func (cmd *Cmd) StringSlice() ([]string, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- ss := make([]string, len(slice))
- for i, iface := range slice {
- val, err := toString(iface)
- if err != nil {
- return nil, err
- }
- ss[i] = val
- }
- return ss, nil
-}
-
-func (cmd *Cmd) Int64Slice() ([]int64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- nums := make([]int64, len(slice))
- for i, iface := range slice {
- val, err := toInt64(iface)
- if err != nil {
- return nil, err
- }
- nums[i] = val
- }
- return nums, nil
-}
-
-func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- nums := make([]uint64, len(slice))
- for i, iface := range slice {
- val, err := toUint64(iface)
- if err != nil {
- return nil, err
- }
- nums[i] = val
- }
- return nums, nil
-}
-
-func (cmd *Cmd) Float32Slice() ([]float32, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- floats := make([]float32, len(slice))
- for i, iface := range slice {
- val, err := toFloat32(iface)
- if err != nil {
- return nil, err
- }
- floats[i] = val
- }
- return floats, nil
-}
-
-func (cmd *Cmd) Float64Slice() ([]float64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- floats := make([]float64, len(slice))
- for i, iface := range slice {
- val, err := toFloat64(iface)
- if err != nil {
- return nil, err
- }
- floats[i] = val
- }
- return floats, nil
-}
-
-func (cmd *Cmd) BoolSlice() ([]bool, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- bools := make([]bool, len(slice))
- for i, iface := range slice {
- val, err := toBool(iface)
- if err != nil {
- return nil, err
- }
- bools[i] = val
- }
- return bools, nil
-}
-
-func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadReply(sliceParser)
- return err
-}
-
-// sliceParser implements proto.MultiBulkParse.
-func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- vals := make([]interface{}, n)
- for i := 0; i < len(vals); i++ {
- v, err := rd.ReadReply(sliceParser)
- if err != nil {
- if err == Nil {
- vals[i] = nil
- continue
- }
- if err, ok := err.(proto.RedisError); ok {
- vals[i] = err
- continue
- }
- return nil, err
- }
- vals[i] = v
- }
- return vals, nil
-}
-
-//------------------------------------------------------------------------------
-
-type SliceCmd struct {
- baseCmd
-
- val []interface{}
-}
-
-var _ Cmder = (*SliceCmd)(nil)
-
-func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
- return &SliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *SliceCmd) SetVal(val []interface{}) {
- cmd.val = val
-}
-
-func (cmd *SliceCmd) Val() []interface{} {
- return cmd.val
-}
-
-func (cmd *SliceCmd) Result() ([]interface{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *SliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-// Scan scans the results from the map into a destination struct. The map keys
-// are matched in the Redis struct fields by the `redis:"field"` tag.
-func (cmd *SliceCmd) Scan(dst interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
-
- // Pass the list of keys and values.
- // Skip the first two args for: HMGET key
- var args []interface{}
- if cmd.args[0] == "hmget" {
- args = cmd.args[2:]
- } else {
- // Otherwise, it's: MGET field field ...
- args = cmd.args[1:]
- }
-
- return hscan.Scan(dst, args, cmd.val)
-}
-
-func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadArrayReply(sliceParser)
- if err != nil {
- return err
- }
- cmd.val = v.([]interface{})
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type StatusCmd struct {
- baseCmd
-
- val string
-}
-
-var _ Cmder = (*StatusCmd)(nil)
-
-func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
- return &StatusCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StatusCmd) SetVal(val string) {
- cmd.val = val
-}
-
-func (cmd *StatusCmd) Val() string {
- return cmd.val
-}
-
-func (cmd *StatusCmd) Result() (string, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StatusCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadString()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type IntCmd struct {
- baseCmd
-
- val int64
-}
-
-var _ Cmder = (*IntCmd)(nil)
-
-func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
- return &IntCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *IntCmd) SetVal(val int64) {
- cmd.val = val
-}
-
-func (cmd *IntCmd) Val() int64 {
- return cmd.val
-}
-
-func (cmd *IntCmd) Result() (int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *IntCmd) Uint64() (uint64, error) {
- return uint64(cmd.val), cmd.err
-}
-
-func (cmd *IntCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadIntReply()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type IntSliceCmd struct {
- baseCmd
-
- val []int64
-}
-
-var _ Cmder = (*IntSliceCmd)(nil)
-
-func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
- return &IntSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *IntSliceCmd) SetVal(val []int64) {
- cmd.val = val
-}
-
-func (cmd *IntSliceCmd) Val() []int64 {
- return cmd.val
-}
-
-func (cmd *IntSliceCmd) Result() ([]int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *IntSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]int64, n)
- for i := 0; i < len(cmd.val); i++ {
- num, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.val[i] = num
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type DurationCmd struct {
- baseCmd
-
- val time.Duration
- precision time.Duration
-}
-
-var _ Cmder = (*DurationCmd)(nil)
-
-func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
- return &DurationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- precision: precision,
- }
-}
-
-func (cmd *DurationCmd) SetVal(val time.Duration) {
- cmd.val = val
-}
-
-func (cmd *DurationCmd) Val() time.Duration {
- return cmd.val
-}
-
-func (cmd *DurationCmd) Result() (time.Duration, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *DurationCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadIntReply()
- if err != nil {
- return err
- }
- switch n {
- // -2 if the key does not exist
- // -1 if the key exists but has no associated expire
- case -2, -1:
- cmd.val = time.Duration(n)
- default:
- cmd.val = time.Duration(n) * cmd.precision
- }
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type TimeCmd struct {
- baseCmd
-
- val time.Time
-}
-
-var _ Cmder = (*TimeCmd)(nil)
-
-func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
- return &TimeCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *TimeCmd) SetVal(val time.Time) {
- cmd.val = val
-}
-
-func (cmd *TimeCmd) Val() time.Time {
- return cmd.val
-}
-
-func (cmd *TimeCmd) Result() (time.Time, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *TimeCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d elements, expected 2", n)
- }
-
- sec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- microsec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- cmd.val = time.Unix(sec, microsec*1000)
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type BoolCmd struct {
- baseCmd
-
- val bool
-}
-
-var _ Cmder = (*BoolCmd)(nil)
-
-func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
- return &BoolCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *BoolCmd) SetVal(val bool) {
- cmd.val = val
-}
-
-func (cmd *BoolCmd) Val() bool {
- return cmd.val
-}
-
-func (cmd *BoolCmd) Result() (bool, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *BoolCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadReply(nil)
- // `SET key value NX` returns nil when key already exists. But
- // `SETNX key value` returns bool (0/1). So convert nil to bool.
- if err == Nil {
- cmd.val = false
- return nil
- }
- if err != nil {
- return err
- }
- switch v := v.(type) {
- case int64:
- cmd.val = v == 1
- return nil
- case string:
- cmd.val = v == "OK"
- return nil
- default:
- return fmt.Errorf("got %T, wanted int64 or string", v)
- }
-}
-
-//------------------------------------------------------------------------------
-
-type StringCmd struct {
- baseCmd
-
- val string
-}
-
-var _ Cmder = (*StringCmd)(nil)
-
-func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
- return &StringCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringCmd) SetVal(val string) {
- cmd.val = val
-}
-
-func (cmd *StringCmd) Val() string {
- return cmd.val
-}
-
-func (cmd *StringCmd) Result() (string, error) {
- return cmd.Val(), cmd.err
-}
-
-func (cmd *StringCmd) Bytes() ([]byte, error) {
- return util.StringToBytes(cmd.val), cmd.err
-}
-
-func (cmd *StringCmd) Bool() (bool, error) {
- if cmd.err != nil {
- return false, cmd.err
- }
- return strconv.ParseBool(cmd.val)
-}
-
-func (cmd *StringCmd) Int() (int, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.Atoi(cmd.Val())
-}
-
-func (cmd *StringCmd) Int64() (int64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseInt(cmd.Val(), 10, 64)
-}
-
-func (cmd *StringCmd) Uint64() (uint64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseUint(cmd.Val(), 10, 64)
-}
-
-func (cmd *StringCmd) Float32() (float32, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- f, err := strconv.ParseFloat(cmd.Val(), 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
-}
-
-func (cmd *StringCmd) Float64() (float64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseFloat(cmd.Val(), 64)
-}
-
-func (cmd *StringCmd) Time() (time.Time, error) {
- if cmd.err != nil {
- return time.Time{}, cmd.err
- }
- return time.Parse(time.RFC3339Nano, cmd.Val())
-}
-
-func (cmd *StringCmd) Scan(val interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
- return proto.Scan([]byte(cmd.val), val)
-}
-
-func (cmd *StringCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadString()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type FloatCmd struct {
- baseCmd
-
- val float64
-}
-
-var _ Cmder = (*FloatCmd)(nil)
-
-func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
- return &FloatCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *FloatCmd) SetVal(val float64) {
- cmd.val = val
-}
-
-func (cmd *FloatCmd) Val() float64 {
- return cmd.val
-}
-
-func (cmd *FloatCmd) Result() (float64, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *FloatCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadFloatReply()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type FloatSliceCmd struct {
- baseCmd
-
- val []float64
-}
-
-var _ Cmder = (*FloatSliceCmd)(nil)
-
-func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
- return &FloatSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *FloatSliceCmd) SetVal(val []float64) {
- cmd.val = val
-}
-
-func (cmd *FloatSliceCmd) Val() []float64 {
- return cmd.val
-}
-
-func (cmd *FloatSliceCmd) Result() ([]float64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *FloatSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]float64, n)
- for i := 0; i < len(cmd.val); i++ {
- switch num, err := rd.ReadFloatReply(); {
- case err == Nil:
- cmd.val[i] = 0
- case err != nil:
- return nil, err
- default:
- cmd.val[i] = num
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringSliceCmd struct {
- baseCmd
-
- val []string
-}
-
-var _ Cmder = (*StringSliceCmd)(nil)
-
-func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
- return &StringSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringSliceCmd) SetVal(val []string) {
- cmd.val = val
-}
-
-func (cmd *StringSliceCmd) Val() []string {
- return cmd.val
-}
-
-func (cmd *StringSliceCmd) Result() ([]string, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *StringSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
- return proto.ScanSlice(cmd.Val(), container)
-}
-
-func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]string, n)
- for i := 0; i < len(cmd.val); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.val[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.val[i] = s
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type BoolSliceCmd struct {
- baseCmd
-
- val []bool
-}
-
-var _ Cmder = (*BoolSliceCmd)(nil)
-
-func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
- return &BoolSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *BoolSliceCmd) SetVal(val []bool) {
- cmd.val = val
-}
-
-func (cmd *BoolSliceCmd) Val() []bool {
- return cmd.val
-}
-
-func (cmd *BoolSliceCmd) Result() ([]bool, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *BoolSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]bool, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.val[i] = n == 1
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringStringMapCmd struct {
- baseCmd
-
- val map[string]string
-}
-
-var _ Cmder = (*StringStringMapCmd)(nil)
-
-func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd {
- return &StringStringMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringStringMapCmd) SetVal(val map[string]string) {
- cmd.val = val
-}
-
-func (cmd *StringStringMapCmd) Val() map[string]string {
- return cmd.val
-}
-
-func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringStringMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-// Scan scans the results from the map into a destination struct. The map keys
-// are matched in the Redis struct fields by the `redis:"field"` tag.
-func (cmd *StringStringMapCmd) Scan(dest interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
-
- strct, err := hscan.Struct(dest)
- if err != nil {
- return err
- }
-
- for k, v := range cmd.val {
- if err := strct.Scan(k, v); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]string, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- value, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val[key] = value
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringIntMapCmd struct {
- baseCmd
-
- val map[string]int64
-}
-
-var _ Cmder = (*StringIntMapCmd)(nil)
-
-func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd {
- return &StringIntMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringIntMapCmd) SetVal(val map[string]int64) {
- cmd.val = val
-}
-
-func (cmd *StringIntMapCmd) Val() map[string]int64 {
- return cmd.val
-}
-
-func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringIntMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]int64, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[key] = n
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringStructMapCmd struct {
- baseCmd
-
- val map[string]struct{}
-}
-
-var _ Cmder = (*StringStructMapCmd)(nil)
-
-func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
- return &StringStructMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
- cmd.val = val
-}
-
-func (cmd *StringStructMapCmd) Val() map[string]struct{} {
- return cmd.val
-}
-
-func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringStructMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]struct{}, n)
- for i := int64(0); i < n; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- cmd.val[key] = struct{}{}
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XMessage struct {
- ID string
- Values map[string]interface{}
-}
-
-type XMessageSliceCmd struct {
- baseCmd
-
- val []XMessage
-}
-
-var _ Cmder = (*XMessageSliceCmd)(nil)
-
-func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
- return &XMessageSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
- cmd.val = val
-}
-
-func (cmd *XMessageSliceCmd) Val() []XMessage {
- return cmd.val
-}
-
-func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XMessageSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
- var err error
- cmd.val, err = readXMessageSlice(rd)
- return err
-}
-
-func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- msgs := make([]XMessage, n)
- for i := 0; i < n; i++ {
- var err error
- msgs[i], err = readXMessage(rd)
- if err != nil {
- return nil, err
- }
- }
- return msgs, nil
-}
-
-func readXMessage(rd *proto.Reader) (XMessage, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return XMessage{}, err
- }
- if n != 2 {
- return XMessage{}, fmt.Errorf("got %d, wanted 2", n)
- }
-
- id, err := rd.ReadString()
- if err != nil {
- return XMessage{}, err
- }
-
- var values map[string]interface{}
-
- v, err := rd.ReadArrayReply(stringInterfaceMapParser)
- if err != nil {
- if err != proto.Nil {
- return XMessage{}, err
- }
- } else {
- values = v.(map[string]interface{})
- }
-
- return XMessage{
- ID: id,
- Values: values,
- }, nil
-}
-
-// stringInterfaceMapParser implements proto.MultiBulkParse.
-func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
- m := make(map[string]interface{}, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- value, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- m[key] = value
- }
- return m, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XStream struct {
- Stream string
- Messages []XMessage
-}
-
-type XStreamSliceCmd struct {
- baseCmd
-
- val []XStream
-}
-
-var _ Cmder = (*XStreamSliceCmd)(nil)
-
-func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
- return &XStreamSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
- cmd.val = val
-}
-
-func (cmd *XStreamSliceCmd) Val() []XStream {
- return cmd.val
-}
-
-func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XStreamSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]XStream, n)
- for i := 0; i < len(cmd.val); i++ {
- i := i
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
-
- stream, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- msgs, err := readXMessageSlice(rd)
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = XStream{
- Stream: stream,
- Messages: msgs,
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XPending struct {
- Count int64
- Lower string
- Higher string
- Consumers map[string]int64
-}
-
-type XPendingCmd struct {
- baseCmd
- val *XPending
-}
-
-var _ Cmder = (*XPendingCmd)(nil)
-
-func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
- return &XPendingCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XPendingCmd) SetVal(val *XPending) {
- cmd.val = val
-}
-
-func (cmd *XPendingCmd) Val() *XPending {
- return cmd.val
-}
-
-func (cmd *XPendingCmd) Result() (*XPending, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XPendingCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 4 {
- return nil, fmt.Errorf("got %d, wanted 4", n)
- }
-
- count, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- lower, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- higher, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- cmd.val = &XPending{
- Count: count,
- Lower: lower,
- Higher: higher,
- }
- _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- for i := int64(0); i < n; i++ {
- _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
-
- consumerName, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- consumerPending, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- if cmd.val.Consumers == nil {
- cmd.val.Consumers = make(map[string]int64)
- }
- cmd.val.Consumers[consumerName] = consumerPending
-
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- if err != nil && err != Nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XPendingExt struct {
- ID string
- Consumer string
- Idle time.Duration
- RetryCount int64
-}
-
-type XPendingExtCmd struct {
- baseCmd
- val []XPendingExt
-}
-
-var _ Cmder = (*XPendingExtCmd)(nil)
-
-func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
- return &XPendingExtCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
- cmd.val = val
-}
-
-func (cmd *XPendingExtCmd) Val() []XPendingExt {
- return cmd.val
-}
-
-func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XPendingExtCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]XPendingExt, 0, n)
- for i := int64(0); i < n; i++ {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 4 {
- return nil, fmt.Errorf("got %d, wanted 4", n)
- }
-
- id, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- consumer, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- idle, err := rd.ReadIntReply()
- if err != nil && err != Nil {
- return nil, err
- }
-
- retryCount, err := rd.ReadIntReply()
- if err != nil && err != Nil {
- return nil, err
- }
-
- cmd.val = append(cmd.val, XPendingExt{
- ID: id,
- Consumer: consumer,
- Idle: time.Duration(idle) * time.Millisecond,
- RetryCount: retryCount,
- })
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XAutoClaimCmd struct {
- baseCmd
-
- start string
- val []XMessage
-}
-
-var _ Cmder = (*XAutoClaimCmd)(nil)
-
-func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
- return &XAutoClaimCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
- cmd.val = val
- cmd.start = start
-}
-
-func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
- return cmd.val, cmd.start
-}
-
-func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
- return cmd.val, cmd.start, cmd.err
-}
-
-func (cmd *XAutoClaimCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
- var err error
-
- cmd.start, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val, err = readXMessageSlice(rd)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XAutoClaimJustIDCmd struct {
- baseCmd
-
- start string
- val []string
-}
-
-var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
-
-func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
- return &XAutoClaimJustIDCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
- cmd.val = val
- cmd.start = start
-}
-
-func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
- return cmd.val, cmd.start
-}
-
-func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
- return cmd.val, cmd.start, cmd.err
-}
-
-func (cmd *XAutoClaimJustIDCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
- var err error
-
- cmd.start, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- cmd.val = make([]string, nn)
- for i := 0; i < nn; i++ {
- cmd.val[i], err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoConsumersCmd struct {
- baseCmd
- val []XInfoConsumer
-}
-
-type XInfoConsumer struct {
- Name string
- Pending int64
- Idle int64
-}
-
-var _ Cmder = (*XInfoConsumersCmd)(nil)
-
-func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
- return &XInfoConsumersCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "consumers", stream, group},
- },
- }
-}
-
-func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
- cmd.val = val
-}
-
-func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
- return cmd.val
-}
-
-func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoConsumersCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]XInfoConsumer, n)
-
- for i := 0; i < n; i++ {
- cmd.val[i], err = readXConsumerInfo(rd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) {
- var consumer XInfoConsumer
-
- n, err := rd.ReadArrayLen()
- if err != nil {
- return consumer, err
- }
- if n != 6 {
- return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n)
- }
-
- for i := 0; i < 3; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return consumer, err
- }
-
- val, err := rd.ReadString()
- if err != nil {
- return consumer, err
- }
-
- switch key {
- case "name":
- consumer.Name = val
- case "pending":
- consumer.Pending, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return consumer, err
- }
- case "idle":
- consumer.Idle, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return consumer, err
- }
- default:
- return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
- }
- }
-
- return consumer, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoGroupsCmd struct {
- baseCmd
- val []XInfoGroup
-}
-
-type XInfoGroup struct {
- Name string
- Consumers int64
- Pending int64
- LastDeliveredID string
-}
-
-var _ Cmder = (*XInfoGroupsCmd)(nil)
-
-func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
- return &XInfoGroupsCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "groups", stream},
- },
- }
-}
-
-func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
- cmd.val = val
-}
-
-func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
- return cmd.val
-}
-
-func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoGroupsCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]XInfoGroup, n)
-
- for i := 0; i < n; i++ {
- cmd.val[i], err = readXGroupInfo(rd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) {
- var group XInfoGroup
-
- n, err := rd.ReadArrayLen()
- if err != nil {
- return group, err
- }
- if n != 8 {
- return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n)
- }
-
- for i := 0; i < 4; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return group, err
- }
-
- val, err := rd.ReadString()
- if err != nil {
- return group, err
- }
-
- switch key {
- case "name":
- group.Name = val
- case "consumers":
- group.Consumers, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return group, err
- }
- case "pending":
- group.Pending, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return group, err
- }
- case "last-delivered-id":
- group.LastDeliveredID = val
- default:
- return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key)
- }
- }
-
- return group, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoStreamCmd struct {
- baseCmd
- val *XInfoStream
-}
-
-type XInfoStream struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- Groups int64
- LastGeneratedID string
- FirstEntry XMessage
- LastEntry XMessage
-}
-
-var _ Cmder = (*XInfoStreamCmd)(nil)
-
-func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
- return &XInfoStreamCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "stream", stream},
- },
- }
-}
-
-func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
- cmd.val = val
-}
-
-func (cmd *XInfoStreamCmd) Val() *XInfoStream {
- return cmd.val
-}
-
-func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoStreamCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadReply(xStreamInfoParser)
- if err != nil {
- return err
- }
- cmd.val = v.(*XInfoStream)
- return nil
-}
-
-func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 14 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
- "wanted 14", n)
- }
- var info XInfoStream
- for i := 0; i < 7; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- switch key {
- case "length":
- info.Length, err = rd.ReadIntReply()
- case "radix-tree-keys":
- info.RadixTreeKeys, err = rd.ReadIntReply()
- case "radix-tree-nodes":
- info.RadixTreeNodes, err = rd.ReadIntReply()
- case "groups":
- info.Groups, err = rd.ReadIntReply()
- case "last-generated-id":
- info.LastGeneratedID, err = rd.ReadString()
- case "first-entry":
- info.FirstEntry, err = readXMessage(rd)
- if err == Nil {
- err = nil
- }
- case "last-entry":
- info.LastEntry, err = readXMessage(rd)
- if err == Nil {
- err = nil
- }
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
- if err != nil {
- return nil, err
- }
- }
- return &info, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoStreamFullCmd struct {
- baseCmd
- val *XInfoStreamFull
-}
-
-type XInfoStreamFull struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- LastGeneratedID string
- Entries []XMessage
- Groups []XInfoStreamGroup
-}
-
-type XInfoStreamGroup struct {
- Name string
- LastDeliveredID string
- PelCount int64
- Pending []XInfoStreamGroupPending
- Consumers []XInfoStreamConsumer
-}
-
-type XInfoStreamGroupPending struct {
- ID string
- Consumer string
- DeliveryTime time.Time
- DeliveryCount int64
-}
-
-type XInfoStreamConsumer struct {
- Name string
- SeenTime time.Time
- PelCount int64
- Pending []XInfoStreamConsumerPending
-}
-
-type XInfoStreamConsumerPending struct {
- ID string
- DeliveryTime time.Time
- DeliveryCount int64
-}
-
-var _ Cmder = (*XInfoStreamFullCmd)(nil)
-
-func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
- return &XInfoStreamFullCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
- cmd.val = val
-}
-
-func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
- return cmd.val
-}
-
-func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoStreamFullCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
- if n != 12 {
- return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 12", n)
- }
-
- cmd.val = &XInfoStreamFull{}
-
- for i := 0; i < 6; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return err
- }
-
- switch key {
- case "length":
- cmd.val.Length, err = rd.ReadIntReply()
- case "radix-tree-keys":
- cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
- case "radix-tree-nodes":
- cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
- case "last-generated-id":
- cmd.val.LastGeneratedID, err = rd.ReadString()
- case "entries":
- cmd.val.Entries, err = readXMessageSlice(rd)
- case "groups":
- cmd.val.Groups, err = readStreamGroups(rd)
- default:
- return fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- groups := make([]XInfoStreamGroup, 0, n)
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 10 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 10", nn)
- }
-
- group := XInfoStreamGroup{}
-
- for f := 0; f < 5; f++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- switch key {
- case "name":
- group.Name, err = rd.ReadString()
- case "last-delivered-id":
- group.LastDeliveredID, err = rd.ReadString()
- case "pel-count":
- group.PelCount, err = rd.ReadIntReply()
- case "pending":
- group.Pending, err = readXInfoStreamGroupPending(rd)
- case "consumers":
- group.Consumers, err = readXInfoStreamConsumers(rd)
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- groups = append(groups, group)
- }
-
- return groups, nil
-}
-
-func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- pending := make([]XInfoStreamGroupPending, 0, n)
-
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 4 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 4", nn)
- }
-
- p := XInfoStreamGroupPending{}
-
- p.ID, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- p.Consumer, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- delivery, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
-
- p.DeliveryCount, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- pending = append(pending, p)
- }
-
- return pending, nil
-}
-
-func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- consumers := make([]XInfoStreamConsumer, 0, n)
-
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 8 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 8", nn)
- }
-
- c := XInfoStreamConsumer{}
-
- for f := 0; f < 4; f++ {
- cKey, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- switch cKey {
- case "name":
- c.Name, err = rd.ReadString()
- case "seen-time":
- seen, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
- case "pel-count":
- c.PelCount, err = rd.ReadIntReply()
- case "pending":
- pendingNumber, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
-
- for pn := 0; pn < pendingNumber; pn++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 3 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
- "wanted 3", nn)
- }
-
- p := XInfoStreamConsumerPending{}
-
- p.ID, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- delivery, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
-
- p.DeliveryCount, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- c.Pending = append(c.Pending, p)
- }
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", cKey)
- }
- if err != nil {
- return nil, err
- }
- }
- consumers = append(consumers, c)
- }
-
- return consumers, nil
-}
-
-//------------------------------------------------------------------------------
-
-type ZSliceCmd struct {
- baseCmd
-
- val []Z
-}
-
-var _ Cmder = (*ZSliceCmd)(nil)
-
-func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
- return &ZSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ZSliceCmd) SetVal(val []Z) {
- cmd.val = val
-}
-
-func (cmd *ZSliceCmd) Val() []Z {
- return cmd.val
-}
-
-func (cmd *ZSliceCmd) Result() ([]Z, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *ZSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]Z, n/2)
- for i := 0; i < len(cmd.val); i++ {
- member, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- score, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = Z{
- Member: member,
- Score: score,
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type ZWithKeyCmd struct {
- baseCmd
-
- val *ZWithKey
-}
-
-var _ Cmder = (*ZWithKeyCmd)(nil)
-
-func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
- return &ZWithKeyCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
- cmd.val = val
-}
-
-func (cmd *ZWithKeyCmd) Val() *ZWithKey {
- return cmd.val
-}
-
-func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *ZWithKeyCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 3 {
- return nil, fmt.Errorf("got %d elements, expected 3", n)
- }
-
- cmd.val = &ZWithKey{}
- var err error
-
- cmd.val.Key, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val.Member, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val.Score, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type ScanCmd struct {
- baseCmd
-
- page []string
- cursor uint64
-
- process cmdable
-}
-
-var _ Cmder = (*ScanCmd)(nil)
-
-func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
- return &ScanCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- process: process,
- }
-}
-
-func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
- cmd.page = page
- cmd.cursor = cursor
-}
-
-func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
- return cmd.page, cmd.cursor
-}
-
-func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
- return cmd.page, cmd.cursor, cmd.err
-}
-
-func (cmd *ScanCmd) String() string {
- return cmdString(cmd, cmd.page)
-}
-
-func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) {
- cmd.page, cmd.cursor, err = rd.ReadScanReply()
- return err
-}
-
-// Iterator creates a new ScanIterator.
-func (cmd *ScanCmd) Iterator() *ScanIterator {
- return &ScanIterator{
- cmd: cmd,
- }
-}
-
-//------------------------------------------------------------------------------
-
-type ClusterNode struct {
- ID string
- Addr string
-}
-
-type ClusterSlot struct {
- Start int
- End int
- Nodes []ClusterNode
-}
-
-type ClusterSlotsCmd struct {
- baseCmd
-
- val []ClusterSlot
-}
-
-var _ Cmder = (*ClusterSlotsCmd)(nil)
-
-func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
- return &ClusterSlotsCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
- cmd.val = val
-}
-
-func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
- return cmd.val
-}
-
-func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *ClusterSlotsCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]ClusterSlot, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n < 2 {
- err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
- return nil, err
- }
-
- start, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- end, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- nodes := make([]ClusterNode, n-2)
- for j := 0; j < len(nodes); j++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 && n != 3 {
- err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
- return nil, err
- }
-
- ip, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- port, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- nodes[j].Addr = net.JoinHostPort(ip, port)
-
- if n == 3 {
- id, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- nodes[j].ID = id
- }
- }
-
- cmd.val[i] = ClusterSlot{
- Start: int(start),
- End: int(end),
- Nodes: nodes,
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-// GeoLocation is used with GeoAdd to add geospatial location.
-type GeoLocation struct {
- Name string
- Longitude, Latitude, Dist float64
- GeoHash int64
-}
-
-// GeoRadiusQuery is used with GeoRadius to query geospatial index.
-type GeoRadiusQuery struct {
- Radius float64
- // Can be m, km, ft, or mi. Default is km.
- Unit string
- WithCoord bool
- WithDist bool
- WithGeoHash bool
- Count int
- // Can be ASC or DESC. Default is no sort order.
- Sort string
- Store string
- StoreDist string
-}
-
-type GeoLocationCmd struct {
- baseCmd
-
- q *GeoRadiusQuery
- locations []GeoLocation
-}
-
-var _ Cmder = (*GeoLocationCmd)(nil)
-
-func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
- return &GeoLocationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: geoLocationArgs(q, args...),
- },
- q: q,
- }
-}
-
-func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
- args = append(args, q.Radius)
- if q.Unit != "" {
- args = append(args, q.Unit)
- } else {
- args = append(args, "km")
- }
- if q.WithCoord {
- args = append(args, "withcoord")
- }
- if q.WithDist {
- args = append(args, "withdist")
- }
- if q.WithGeoHash {
- args = append(args, "withhash")
- }
- if q.Count > 0 {
- args = append(args, "count", q.Count)
- }
- if q.Sort != "" {
- args = append(args, q.Sort)
- }
- if q.Store != "" {
- args = append(args, "store")
- args = append(args, q.Store)
- }
- if q.StoreDist != "" {
- args = append(args, "storedist")
- args = append(args, q.StoreDist)
- }
- return args
-}
-
-func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
- cmd.locations = locations
-}
-
-func (cmd *GeoLocationCmd) Val() []GeoLocation {
- return cmd.locations
-}
-
-func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
- return cmd.locations, cmd.err
-}
-
-func (cmd *GeoLocationCmd) String() string {
- return cmdString(cmd, cmd.locations)
-}
-
-func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
- if err != nil {
- return err
- }
- cmd.locations = v.([]GeoLocation)
- return nil
-}
-
-func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- locs := make([]GeoLocation, 0, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(newGeoLocationParser(q))
- if err != nil {
- return nil, err
- }
- switch vv := v.(type) {
- case string:
- locs = append(locs, GeoLocation{
- Name: vv,
- })
- case *GeoLocation:
- // TODO: avoid copying
- locs = append(locs, *vv)
- default:
- return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
- }
- }
- return locs, nil
- }
-}
-
-func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- var loc GeoLocation
- var err error
-
- loc.Name, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- if q.WithDist {
- loc.Dist, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithGeoHash {
- loc.GeoHash, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithCoord {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 {
- return nil, fmt.Errorf("got %d coordinates, expected 2", n)
- }
-
- loc.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- loc.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
-
- return &loc, nil
- }
-}
-
-//------------------------------------------------------------------------------
-
-// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
-type GeoSearchQuery struct {
- Member string
-
- // Latitude and Longitude when using FromLonLat option.
- Longitude float64
- Latitude float64
-
- // Distance and unit when using ByRadius option.
- // Can use m, km, ft, or mi. Default is km.
- Radius float64
- RadiusUnit string
-
- // Height, width and unit when using ByBox option.
- // Can be m, km, ft, or mi. Default is km.
- BoxWidth float64
- BoxHeight float64
- BoxUnit string
-
- // Can be ASC or DESC. Default is no sort order.
- Sort string
- Count int
- CountAny bool
-}
-
-type GeoSearchLocationQuery struct {
- GeoSearchQuery
-
- WithCoord bool
- WithDist bool
- WithHash bool
-}
-
-type GeoSearchStoreQuery struct {
- GeoSearchQuery
-
- // When using the StoreDist option, the command stores the items in a
- // sorted set populated with their distance from the center of the circle or box,
- // as a floating-point number, in the same unit specified for that shape.
- StoreDist bool
-}
-
-func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
- args = geoSearchArgs(&q.GeoSearchQuery, args)
-
- if q.WithCoord {
- args = append(args, "withcoord")
- }
- if q.WithDist {
- args = append(args, "withdist")
- }
- if q.WithHash {
- args = append(args, "withhash")
- }
-
- return args
-}
-
-func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
- if q.Member != "" {
- args = append(args, "frommember", q.Member)
- } else {
- args = append(args, "fromlonlat", q.Longitude, q.Latitude)
- }
-
- if q.Radius > 0 {
- if q.RadiusUnit == "" {
- q.RadiusUnit = "km"
- }
- args = append(args, "byradius", q.Radius, q.RadiusUnit)
- } else {
- if q.BoxUnit == "" {
- q.BoxUnit = "km"
- }
- args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
- }
-
- if q.Sort != "" {
- args = append(args, q.Sort)
- }
-
- if q.Count > 0 {
- args = append(args, "count", q.Count)
- if q.CountAny {
- args = append(args, "any")
- }
- }
-
- return args
-}
-
-type GeoSearchLocationCmd struct {
- baseCmd
-
- opt *GeoSearchLocationQuery
- val []GeoLocation
-}
-
-var _ Cmder = (*GeoSearchLocationCmd)(nil)
-
-func NewGeoSearchLocationCmd(
- ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
-) *GeoSearchLocationCmd {
- return &GeoSearchLocationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- opt: opt,
- }
-}
-
-func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
- cmd.val = val
-}
-
-func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
- return cmd.val
-}
-
-func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *GeoSearchLocationCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]GeoLocation, n)
- for i := 0; i < n; i++ {
- _, err = rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- var loc GeoLocation
-
- loc.Name, err = rd.ReadString()
- if err != nil {
- return err
- }
- if cmd.opt.WithDist {
- loc.Dist, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- }
- if cmd.opt.WithHash {
- loc.GeoHash, err = rd.ReadIntReply()
- if err != nil {
- return err
- }
- }
- if cmd.opt.WithCoord {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
- if nn != 2 {
- return fmt.Errorf("got %d coordinates, expected 2", nn)
- }
-
- loc.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- loc.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- }
-
- cmd.val[i] = loc
- }
-
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type GeoPos struct {
- Longitude, Latitude float64
-}
-
-type GeoPosCmd struct {
- baseCmd
-
- val []*GeoPos
-}
-
-var _ Cmder = (*GeoPosCmd)(nil)
-
-func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
- return &GeoPosCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
- cmd.val = val
-}
-
-func (cmd *GeoPosCmd) Val() []*GeoPos {
- return cmd.val
-}
-
-func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *GeoPosCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]*GeoPos, n)
- for i := 0; i < len(cmd.val); i++ {
- i := i
- _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- longitude, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- latitude, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = &GeoPos{
- Longitude: longitude,
- Latitude: latitude,
- }
- return nil, nil
- })
- if err != nil {
- if err == Nil {
- cmd.val[i] = nil
- continue
- }
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type CommandInfo struct {
- Name string
- Arity int8
- Flags []string
- ACLFlags []string
- FirstKeyPos int8
- LastKeyPos int8
- StepCount int8
- ReadOnly bool
-}
-
-type CommandsInfoCmd struct {
- baseCmd
-
- val map[string]*CommandInfo
-}
-
-var _ Cmder = (*CommandsInfoCmd)(nil)
-
-func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
- return &CommandsInfoCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
- cmd.val = val
-}
-
-func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
- return cmd.val
-}
-
-func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *CommandsInfoCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]*CommandInfo, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(commandInfoParser)
- if err != nil {
- return nil, err
- }
- vv := v.(*CommandInfo)
- cmd.val[vv.Name] = vv
- }
- return nil, nil
- })
- return err
-}
-
-func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
- const numArgRedis5 = 6
- const numArgRedis6 = 7
-
- switch n {
- case numArgRedis5, numArgRedis6:
- // continue
- default:
- return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n)
- }
-
- var cmd CommandInfo
- var err error
-
- cmd.Name, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- arity, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.Arity = int8(arity)
-
- _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.Flags = make([]string, n)
- for i := 0; i < len(cmd.Flags); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.Flags[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.Flags[i] = s
- }
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
-
- firstKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.FirstKeyPos = int8(firstKeyPos)
-
- lastKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.LastKeyPos = int8(lastKeyPos)
-
- stepCount, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.StepCount = int8(stepCount)
-
- for _, flag := range cmd.Flags {
- if flag == "readonly" {
- cmd.ReadOnly = true
- break
- }
- }
-
- if n == numArgRedis5 {
- return &cmd, nil
- }
-
- _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.ACLFlags = make([]string, n)
- for i := 0; i < len(cmd.ACLFlags); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.ACLFlags[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.ACLFlags[i] = s
- }
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
-
- return &cmd, nil
-}
-
-//------------------------------------------------------------------------------
-
-type cmdsInfoCache struct {
- fn func(ctx context.Context) (map[string]*CommandInfo, error)
-
- once internal.Once
- cmds map[string]*CommandInfo
-}
-
-func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
- return &cmdsInfoCache{
- fn: fn,
- }
-}
-
-func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
- err := c.once.Do(func() error {
- cmds, err := c.fn(ctx)
- if err != nil {
- return err
- }
-
- // Extensions have cmd names in upper case. Convert them to lower case.
- for k, v := range cmds {
- lower := internal.ToLower(k)
- if lower != k {
- cmds[lower] = v
- }
- }
-
- c.cmds = cmds
- return nil
- })
- return c.cmds, err
-}
-
-//------------------------------------------------------------------------------
-
-type SlowLog struct {
- ID int64
- Time time.Time
- Duration time.Duration
- Args []string
- // These are also optional fields emitted only by Redis 4.0 or greater:
- // https://redis.io/commands/slowlog#output-format
- ClientAddr string
- ClientName string
-}
-
-type SlowLogCmd struct {
- baseCmd
-
- val []SlowLog
-}
-
-var _ Cmder = (*SlowLogCmd)(nil)
-
-func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
- return &SlowLogCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
- cmd.val = val
-}
-
-func (cmd *SlowLogCmd) Val() []SlowLog {
- return cmd.val
-}
-
-func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *SlowLogCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]SlowLog, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n < 4 {
- err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n)
- return nil, err
- }
-
- id, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- createdAt, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- createdAtTime := time.Unix(createdAt, 0)
-
- costs, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- costsDuration := time.Duration(costs) * time.Microsecond
-
- cmdLen, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if cmdLen < 1 {
- err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
- return nil, err
- }
-
- cmdString := make([]string, cmdLen)
- for i := 0; i < cmdLen; i++ {
- cmdString[i], err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- }
-
- var address, name string
- for i := 4; i < n; i++ {
- str, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- if i == 4 {
- address = str
- } else if i == 5 {
- name = str
- }
- }
-
- cmd.val[i] = SlowLog{
- ID: id,
- Time: createdAtTime,
- Duration: costsDuration,
- Args: cmdString,
- ClientAddr: address,
- ClientName: name,
- }
- }
- return nil, nil
- })
- return err
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go
deleted file mode 100644
index bbfe089..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go
+++ /dev/null
@@ -1,3475 +0,0 @@
-package redis
-
-import (
- "context"
- "errors"
- "io"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
-)
-
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-// For example:
-//
-// rdb.Set(ctx, key, value, redis.KeepTTL)
-const KeepTTL = -1
-
-func usePrecise(dur time.Duration) bool {
- return dur < time.Second || dur%time.Second != 0
-}
-
-func formatMs(ctx context.Context, dur time.Duration) int64 {
- if dur > 0 && dur < time.Millisecond {
- internal.Logger.Printf(
- ctx,
- "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
- dur, time.Millisecond,
- )
- return 1
- }
- return int64(dur / time.Millisecond)
-}
-
-func formatSec(ctx context.Context, dur time.Duration) int64 {
- if dur > 0 && dur < time.Second {
- internal.Logger.Printf(
- ctx,
- "specified duration is %s, but minimal supported value is %s - truncating to 1s",
- dur, time.Second,
- )
- return 1
- }
- return int64(dur / time.Second)
-}
-
-func appendArgs(dst, src []interface{}) []interface{} {
- if len(src) == 1 {
- return appendArg(dst, src[0])
- }
-
- dst = append(dst, src...)
- return dst
-}
-
-func appendArg(dst []interface{}, arg interface{}) []interface{} {
- switch arg := arg.(type) {
- case []string:
- for _, s := range arg {
- dst = append(dst, s)
- }
- return dst
- case []interface{}:
- dst = append(dst, arg...)
- return dst
- case map[string]interface{}:
- for k, v := range arg {
- dst = append(dst, k, v)
- }
- return dst
- case map[string]string:
- for k, v := range arg {
- dst = append(dst, k, v)
- }
- return dst
- default:
- return append(dst, arg)
- }
-}
-
-type Cmdable interface {
- Pipeline() Pipeliner
- Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
-
- TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
- TxPipeline() Pipeliner
-
- Command(ctx context.Context) *CommandsInfoCmd
- ClientGetName(ctx context.Context) *StringCmd
- Echo(ctx context.Context, message interface{}) *StringCmd
- Ping(ctx context.Context) *StatusCmd
- Quit(ctx context.Context) *StatusCmd
- Del(ctx context.Context, keys ...string) *IntCmd
- Unlink(ctx context.Context, keys ...string) *IntCmd
- Dump(ctx context.Context, key string) *StringCmd
- Exists(ctx context.Context, keys ...string) *IntCmd
- Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
- ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- Keys(ctx context.Context, pattern string) *StringSliceCmd
- Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
- Move(ctx context.Context, key string, db int) *BoolCmd
- ObjectRefCount(ctx context.Context, key string) *IntCmd
- ObjectEncoding(ctx context.Context, key string) *StringCmd
- ObjectIdleTime(ctx context.Context, key string) *DurationCmd
- Persist(ctx context.Context, key string) *BoolCmd
- PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
- PTTL(ctx context.Context, key string) *DurationCmd
- RandomKey(ctx context.Context) *StringCmd
- Rename(ctx context.Context, key, newkey string) *StatusCmd
- RenameNX(ctx context.Context, key, newkey string) *BoolCmd
- Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
- RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
- Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
- SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
- SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
- Touch(ctx context.Context, keys ...string) *IntCmd
- TTL(ctx context.Context, key string) *DurationCmd
- Type(ctx context.Context, key string) *StatusCmd
- Append(ctx context.Context, key, value string) *IntCmd
- Decr(ctx context.Context, key string) *IntCmd
- DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
- Get(ctx context.Context, key string) *StringCmd
- GetRange(ctx context.Context, key string, start, end int64) *StringCmd
- GetSet(ctx context.Context, key string, value interface{}) *StringCmd
- GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
- GetDel(ctx context.Context, key string) *StringCmd
- Incr(ctx context.Context, key string) *IntCmd
- IncrBy(ctx context.Context, key string, value int64) *IntCmd
- IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
- MGet(ctx context.Context, keys ...string) *SliceCmd
- MSet(ctx context.Context, values ...interface{}) *StatusCmd
- MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
- Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
- SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
- // TODO: rename to SetEx
- SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
- SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
- SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
- SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
- StrLen(ctx context.Context, key string) *IntCmd
- Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
-
- GetBit(ctx context.Context, key string, offset int64) *IntCmd
- SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
- BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
- BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
- BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
- BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
-
- Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
- ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
- SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
- HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
- ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
-
- HDel(ctx context.Context, key string, fields ...string) *IntCmd
- HExists(ctx context.Context, key, field string) *BoolCmd
- HGet(ctx context.Context, key, field string) *StringCmd
- HGetAll(ctx context.Context, key string) *StringStringMapCmd
- HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
- HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
- HKeys(ctx context.Context, key string) *StringSliceCmd
- HLen(ctx context.Context, key string) *IntCmd
- HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
- HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
- HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
- HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
- HVals(ctx context.Context, key string) *StringSliceCmd
- HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd
-
- BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
- BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
- BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
- LIndex(ctx context.Context, key string, index int64) *StringCmd
- LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
- LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
- LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
- LLen(ctx context.Context, key string) *IntCmd
- LPop(ctx context.Context, key string) *StringCmd
- LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
- LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
- LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
- LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
- LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
- LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
- LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
- LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
- RPop(ctx context.Context, key string) *StringCmd
- RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
- RPopLPush(ctx context.Context, source, destination string) *StringCmd
- RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
- RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
- LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
- BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
-
- SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
- SCard(ctx context.Context, key string) *IntCmd
- SDiff(ctx context.Context, keys ...string) *StringSliceCmd
- SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
- SInter(ctx context.Context, keys ...string) *StringSliceCmd
- SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
- SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
- SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
- SMembers(ctx context.Context, key string) *StringSliceCmd
- SMembersMap(ctx context.Context, key string) *StringStructMapCmd
- SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
- SPop(ctx context.Context, key string) *StringCmd
- SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
- SRandMember(ctx context.Context, key string) *StringCmd
- SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
- SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
- SUnion(ctx context.Context, keys ...string) *StringSliceCmd
- SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
-
- XAdd(ctx context.Context, a *XAddArgs) *StringCmd
- XDel(ctx context.Context, stream string, ids ...string) *IntCmd
- XLen(ctx context.Context, stream string) *IntCmd
- XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
- XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
- XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
- XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
- XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
- XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
- XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
- XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
- XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
- XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
- XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
- XPending(ctx context.Context, stream, group string) *XPendingCmd
- XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
- XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
- XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
- XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
- XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
-
- // TODO: XTrim and XTrimApprox remove in v9.
- XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
- XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
- XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
- XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
- XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
- XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
- XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
-
- BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
- BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
-
- // TODO: remove
- // ZAddCh
- // ZIncr
- // ZAddNXCh
- // ZAddXXCh
- // ZIncrNX
- // ZIncrXX
- // in v9.
- // use ZAddArgs and ZAddArgsIncr.
-
- ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
- ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
- ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
- ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
- ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
- ZCard(ctx context.Context, key string) *IntCmd
- ZCount(ctx context.Context, key, min, max string) *IntCmd
- ZLexCount(ctx context.Context, key, min, max string) *IntCmd
- ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
- ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
- ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
- ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
- ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
- ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
- ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
- ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
- ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
- ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
- ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
- ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
- ZRank(ctx context.Context, key, member string) *IntCmd
- ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
- ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
- ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
- ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
- ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
- ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
- ZRevRank(ctx context.Context, key, member string) *IntCmd
- ZScore(ctx context.Context, key, member string) *FloatCmd
- ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
- ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
- ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
- ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
- ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
- ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
- ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
-
- PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
- PFCount(ctx context.Context, keys ...string) *IntCmd
- PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
-
- BgRewriteAOF(ctx context.Context) *StatusCmd
- BgSave(ctx context.Context) *StatusCmd
- ClientKill(ctx context.Context, ipPort string) *StatusCmd
- ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
- ClientList(ctx context.Context) *StringCmd
- ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
- ClientID(ctx context.Context) *IntCmd
- ConfigGet(ctx context.Context, parameter string) *SliceCmd
- ConfigResetStat(ctx context.Context) *StatusCmd
- ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
- ConfigRewrite(ctx context.Context) *StatusCmd
- DBSize(ctx context.Context) *IntCmd
- FlushAll(ctx context.Context) *StatusCmd
- FlushAllAsync(ctx context.Context) *StatusCmd
- FlushDB(ctx context.Context) *StatusCmd
- FlushDBAsync(ctx context.Context) *StatusCmd
- Info(ctx context.Context, section ...string) *StringCmd
- LastSave(ctx context.Context) *IntCmd
- Save(ctx context.Context) *StatusCmd
- Shutdown(ctx context.Context) *StatusCmd
- ShutdownSave(ctx context.Context) *StatusCmd
- ShutdownNoSave(ctx context.Context) *StatusCmd
- SlaveOf(ctx context.Context, host, port string) *StatusCmd
- Time(ctx context.Context) *TimeCmd
- DebugObject(ctx context.Context, key string) *StringCmd
- ReadOnly(ctx context.Context) *StatusCmd
- ReadWrite(ctx context.Context) *StatusCmd
- MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
-
- Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
- EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
- ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
- ScriptFlush(ctx context.Context) *StatusCmd
- ScriptKill(ctx context.Context) *StatusCmd
- ScriptLoad(ctx context.Context, script string) *StringCmd
-
- Publish(ctx context.Context, channel string, message interface{}) *IntCmd
- PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
- PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd
- PubSubNumPat(ctx context.Context) *IntCmd
-
- ClusterSlots(ctx context.Context) *ClusterSlotsCmd
- ClusterNodes(ctx context.Context) *StringCmd
- ClusterMeet(ctx context.Context, host, port string) *StatusCmd
- ClusterForget(ctx context.Context, nodeID string) *StatusCmd
- ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
- ClusterResetSoft(ctx context.Context) *StatusCmd
- ClusterResetHard(ctx context.Context) *StatusCmd
- ClusterInfo(ctx context.Context) *StringCmd
- ClusterKeySlot(ctx context.Context, key string) *IntCmd
- ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
- ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
- ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
- ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
- ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
- ClusterSaveConfig(ctx context.Context) *StatusCmd
- ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
- ClusterFailover(ctx context.Context) *StatusCmd
- ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
- ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
-
- GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
- GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
- GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
- GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
- GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
- GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
- GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
- GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
- GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
-}
-
-type StatefulCmdable interface {
- Cmdable
- Auth(ctx context.Context, password string) *StatusCmd
- AuthACL(ctx context.Context, username, password string) *StatusCmd
- Select(ctx context.Context, index int) *StatusCmd
- SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
- ClientSetName(ctx context.Context, name string) *BoolCmd
-}
-
-var (
- _ Cmdable = (*Client)(nil)
- _ Cmdable = (*Tx)(nil)
- _ Cmdable = (*Ring)(nil)
- _ Cmdable = (*ClusterClient)(nil)
-)
-
-type cmdable func(ctx context.Context, cmd Cmder) error
-
-type statefulCmdable func(ctx context.Context, cmd Cmder) error
-
-//------------------------------------------------------------------------------
-
-func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "auth", password)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// AuthACL Perform an AUTH command, using the given user and pass.
-// Should be used to authenticate the current connection with one of the connections defined in the ACL list
-// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
-func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "auth", username, password)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
- cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
- cmd := NewStatusCmd(ctx, "select", index)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
- cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientSetName assigns a name to the connection.
-func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "client", "setname", name)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
- cmd := NewCommandsInfoCmd(ctx, "command")
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientGetName returns the name of the connection.
-func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "client", "getname")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
- cmd := NewStringCmd(ctx, "echo", message)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Ping(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "ping")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Quit(_ context.Context) *StatusCmd {
- panic("not implemented")
-}
-
-func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "del"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "unlink"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "dump", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "exists"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "")
-}
-
-func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "NX")
-}
-
-func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "XX")
-}
-
-func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "GT")
-}
-
-func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "LT")
-}
-
-func (c cmdable) expire(
- ctx context.Context, key string, expiration time.Duration, mode string,
-) *BoolCmd {
- args := make([]interface{}, 3, 4)
- args[0] = "expire"
- args[1] = key
- args[2] = formatSec(ctx, expiration)
- if mode != "" {
- args = append(args, mode)
- }
-
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "keys", pattern)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "migrate",
- host,
- port,
- key,
- db,
- formatMs(ctx, timeout),
- )
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
- cmd := NewBoolCmd(ctx, "move", key, db)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "object", "refcount", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "object", "encoding", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "persist", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd(
- ctx,
- "pexpireat",
- key,
- tm.UnixNano()/int64(time.Millisecond),
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "randomkey")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "rename", key, newkey)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "restore",
- key,
- formatMs(ctx, ttl),
- value,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "restore",
- key,
- formatMs(ctx, ttl),
- value,
- "replace",
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-type Sort struct {
- By string
- Offset, Count int64
- Get []string
- Order string
- Alpha bool
-}
-
-func (sort *Sort) args(key string) []interface{} {
- args := []interface{}{"sort", key}
- if sort.By != "" {
- args = append(args, "by", sort.By)
- }
- if sort.Offset != 0 || sort.Count != 0 {
- args = append(args, "limit", sort.Offset, sort.Count)
- }
- for _, get := range sort.Get {
- args = append(args, "get", get)
- }
- if sort.Order != "" {
- args = append(args, sort.Order)
- }
- if sort.Alpha {
- args = append(args, "alpha")
- }
- return args
-}
-
-func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, sort.args(key)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
- args := sort.args(key)
- if store != "" {
- args = append(args, "store", store)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
- cmd := NewSliceCmd(ctx, sort.args(key)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, len(keys)+1)
- args[0] = "touch"
- for i, key := range keys {
- args[i+1] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "type", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
- cmd := NewIntCmd(ctx, "append", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "decr", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
- cmd := NewIntCmd(ctx, "decrby", key, decrement)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
-func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "get", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
- cmd := NewStringCmd(ctx, "getrange", key, start, end)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
- cmd := NewStringCmd(ctx, "getset", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
-// Requires Redis >= 6.2.0.
-func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
- args := make([]interface{}, 0, 4)
- args = append(args, "getex", key)
- if expiration > 0 {
- if usePrecise(expiration) {
- args = append(args, "px", formatMs(ctx, expiration))
- } else {
- args = append(args, "ex", formatSec(ctx, expiration))
- }
- } else if expiration == 0 {
- args = append(args, "persist")
- }
-
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GetDel redis-server version >= 6.2.0.
-func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "getdel", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "incr", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
- cmd := NewIntCmd(ctx, "incrby", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
- cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "mget"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// MSet is like Set but accepts multiple values:
-// - MSet("key1", "value1", "key2", "value2")
-// - MSet([]string{"key1", "value1", "key2", "value2"})
-// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
-func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
- args := make([]interface{}, 1, 1+len(values))
- args[0] = "mset"
- args = appendArgs(args, values)
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// MSetNX is like SetNX but accepts multiple values:
-// - MSetNX("key1", "value1", "key2", "value2")
-// - MSetNX([]string{"key1", "value1", "key2", "value2"})
-// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
-func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
- args := make([]interface{}, 1, 1+len(values))
- args[0] = "msetnx"
- args = appendArgs(args, values)
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Set Redis `SET key value [expiration]` command.
-// Use expiration for `SETEX`-like behavior.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
- args := make([]interface{}, 3, 5)
- args[0] = "set"
- args[1] = key
- args[2] = value
- if expiration > 0 {
- if usePrecise(expiration) {
- args = append(args, "px", formatMs(ctx, expiration))
- } else {
- args = append(args, "ex", formatSec(ctx, expiration))
- }
- } else if expiration == KeepTTL {
- args = append(args, "keepttl")
- }
-
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetArgs provides arguments for the SetArgs function.
-type SetArgs struct {
- // Mode can be `NX` or `XX` or empty.
- Mode string
-
- // Zero `TTL` or `Expiration` means that the key has no expiration time.
- TTL time.Duration
- ExpireAt time.Time
-
- // When Get is true, the command returns the old value stored at key, or nil when key did not exist.
- Get bool
-
- // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
- // otherwise you will receive an error: (error) ERR syntax error.
- KeepTTL bool
-}
-
-// SetArgs supports all the options that the SET command supports.
-// It is the alternative to the Set function when you want
-// to have more control over the options.
-func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
- args := []interface{}{"set", key, value}
-
- if a.KeepTTL {
- args = append(args, "keepttl")
- }
-
- if !a.ExpireAt.IsZero() {
- args = append(args, "exat", a.ExpireAt.Unix())
- }
- if a.TTL > 0 {
- if usePrecise(a.TTL) {
- args = append(args, "px", formatMs(ctx, a.TTL))
- } else {
- args = append(args, "ex", formatSec(ctx, a.TTL))
- }
- }
-
- if a.Mode != "" {
- args = append(args, a.Mode)
- }
-
- if a.Get {
- args = append(args, "get")
- }
-
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetEX Redis `SETEX key expiration value` command.
-func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
- cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetNX Redis `SET key value [expiration] NX` command.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
- var cmd *BoolCmd
- switch expiration {
- case 0:
- // Use old `SETNX` to support old Redis versions.
- cmd = NewBoolCmd(ctx, "setnx", key, value)
- case KeepTTL:
- cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
- default:
- if usePrecise(expiration) {
- cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
- } else {
- cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
- }
- }
-
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetXX Redis `SET key value [expiration] XX` command.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
- var cmd *BoolCmd
- switch expiration {
- case 0:
- cmd = NewBoolCmd(ctx, "set", key, value, "xx")
- case KeepTTL:
- cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
- default:
- if usePrecise(expiration) {
- cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
- } else {
- cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
- }
- }
-
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
- cmd := NewIntCmd(ctx, "setrange", key, offset, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "strlen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
- args := []interface{}{"copy", sourceKey, destKey, "DB", db}
- if replace {
- args = append(args, "REPLACE")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
- cmd := NewIntCmd(ctx, "getbit", key, offset)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
- cmd := NewIntCmd(
- ctx,
- "setbit",
- key,
- offset,
- value,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-type BitCount struct {
- Start, End int64
-}
-
-func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
- args := []interface{}{"bitcount", key}
- if bitCount != nil {
- args = append(
- args,
- bitCount.Start,
- bitCount.End,
- )
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
- args := make([]interface{}, 3+len(keys))
- args[0] = "bitop"
- args[1] = op
- args[2] = destKey
- for i, key := range keys {
- args[3+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "and", destKey, keys...)
-}
-
-func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "or", destKey, keys...)
-}
-
-func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "xor", destKey, keys...)
-}
-
-func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
- return c.bitOp(ctx, "not", destKey, key)
-}
-
-func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
- args := make([]interface{}, 3+len(pos))
- args[0] = "bitpos"
- args[1] = key
- args[2] = bit
- switch len(pos) {
- case 0:
- case 1:
- args[3] = pos[0]
- case 2:
- args[3] = pos[0]
- args[4] = pos[1]
- default:
- panic("too many arguments")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd {
- a := make([]interface{}, 0, 2+len(args))
- a = append(a, "bitfield")
- a = append(a, key)
- a = append(a, args...)
- cmd := NewIntSliceCmd(ctx, a...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"scan", cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
- args := []interface{}{"scan", cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- if keyType != "" {
- args = append(args, "type", keyType)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"sscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"hscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"zscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
- args := make([]interface{}, 2+len(fields))
- args[0] = "hdel"
- args[1] = key
- for i, field := range fields {
- args[2+i] = field
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "hexists", key, field)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
- cmd := NewStringCmd(ctx, "hget", key, field)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd {
- cmd := NewStringStringMapCmd(ctx, "hgetall", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
- cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
- cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "hkeys", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "hlen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HMGet returns the values for the specified fields in the hash stored at key.
-// It returns an interface{} to distinguish between empty string and nil value.
-func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
- args := make([]interface{}, 2+len(fields))
- args[0] = "hmget"
- args[1] = key
- for i, field := range fields {
- args[2+i] = field
- }
- cmd := NewSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HSet accepts values in following formats:
-// - HSet("myhash", "key1", "value1", "key2", "value2")
-// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
-// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
-//
-// Note that it requires Redis v4 for multiple field/value pairs support.
-func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "hset"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
-func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "hmset"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "hvals", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HRandField redis-server version >= 6.2.0.
-func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd {
- args := make([]interface{}, 0, 4)
-
- // Although count=0 is meaningless, redis accepts count=0.
- args = append(args, "hrandfield", key, count)
- if withValues {
- args = append(args, "withvalues")
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "blpop"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "brpop"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(keys)+1] = formatSec(ctx, timeout)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
- cmd := NewStringCmd(
- ctx,
- "brpoplpush",
- source,
- destination,
- formatSec(ctx, timeout),
- )
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
- cmd := NewStringCmd(ctx, "lindex", key, index)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "llen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "lpop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "lpop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type LPosArgs struct {
- Rank, MaxLen int64
-}
-
-func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
- args := []interface{}{"lpos", key, value}
- if a.Rank != 0 {
- args = append(args, "rank", a.Rank)
- }
- if a.MaxLen != 0 {
- args = append(args, "maxlen", a.MaxLen)
- }
-
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
- args := []interface{}{"lpos", key, value, "count", count}
- if a.Rank != 0 {
- args = append(args, "rank", a.Rank)
- }
- if a.MaxLen != 0 {
- args = append(args, "maxlen", a.MaxLen)
- }
- cmd := NewIntSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "lpush"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "lpushx"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(
- ctx,
- "lrange",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "lrem", key, count, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
- cmd := NewStatusCmd(ctx, "lset", key, index, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "ltrim",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "rpop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "rpop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
- cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "rpush"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "rpushx"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
- cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BLMove(
- ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
-) *StringCmd {
- cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "sadd"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "scard", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sdiff"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sdiffstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sinter"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sinterstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "sismember", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
-func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "smismember"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewBoolSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMembers Redis `SMEMBERS key` command output as a slice.
-func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "smembers", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMembersMap Redis `SMEMBERS key` command output as a map.
-func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
- cmd := NewStringStructMapCmd(ctx, "smembers", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "smove", source, destination, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SPop Redis `SPOP key` command.
-func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "spop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SPopN Redis `SPOP key count` command.
-func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "spop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SRandMember Redis `SRANDMEMBER key` command.
-func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "srandmember", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SRandMemberN Redis `SRANDMEMBER key count` command.
-func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "srem"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sunion"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sunionstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// XAddArgs accepts values in the following formats:
-// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
-// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
-// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
-//
-// Note that map will not preserve the order of key-value pairs.
-// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
-type XAddArgs struct {
- Stream string
- NoMkStream bool
- MaxLen int64 // MAXLEN N
-
- // Deprecated: use MaxLen+Approx, remove in v9.
- MaxLenApprox int64 // MAXLEN ~ N
-
- MinID string
- // Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
- Approx bool
- Limit int64
- ID string
- Values interface{}
-}
-
-// XAdd a.Limit has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
- args := make([]interface{}, 0, 11)
- args = append(args, "xadd", a.Stream)
- if a.NoMkStream {
- args = append(args, "nomkstream")
- }
- switch {
- case a.MaxLen > 0:
- if a.Approx {
- args = append(args, "maxlen", "~", a.MaxLen)
- } else {
- args = append(args, "maxlen", a.MaxLen)
- }
- case a.MaxLenApprox > 0:
- // TODO remove in v9.
- args = append(args, "maxlen", "~", a.MaxLenApprox)
- case a.MinID != "":
- if a.Approx {
- args = append(args, "minid", "~", a.MinID)
- } else {
- args = append(args, "minid", a.MinID)
- }
- }
- if a.Limit > 0 {
- args = append(args, "limit", a.Limit)
- }
- if a.ID != "" {
- args = append(args, a.ID)
- } else {
- args = append(args, "*")
- }
- args = appendArg(args, a.Values)
-
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
- args := []interface{}{"xdel", stream}
- for _, id := range ids {
- args = append(args, id)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
- cmd := NewIntCmd(ctx, "xlen", stream)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XReadArgs struct {
- Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
- Count int64
- Block time.Duration
-}
-
-func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 6+len(a.Streams))
- args = append(args, "xread")
-
- keyPos := int8(1)
- if a.Count > 0 {
- args = append(args, "count")
- args = append(args, a.Count)
- keyPos += 2
- }
- if a.Block >= 0 {
- args = append(args, "block")
- args = append(args, int64(a.Block/time.Millisecond))
- keyPos += 2
- }
- args = append(args, "streams")
- keyPos++
- for _, s := range a.Streams {
- args = append(args, s)
- }
-
- cmd := NewXStreamSliceCmd(ctx, args...)
- if a.Block >= 0 {
- cmd.setReadTimeout(a.Block)
- }
- cmd.SetFirstKeyPos(keyPos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
- return c.XRead(ctx, &XReadArgs{
- Streams: streams,
- Block: -1,
- })
-}
-
-func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XReadGroupArgs struct {
- Group string
- Consumer string
- Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
- Count int64
- Block time.Duration
- NoAck bool
-}
-
-func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 10+len(a.Streams))
- args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
-
- keyPos := int8(4)
- if a.Count > 0 {
- args = append(args, "count", a.Count)
- keyPos += 2
- }
- if a.Block >= 0 {
- args = append(args, "block", int64(a.Block/time.Millisecond))
- keyPos += 2
- }
- if a.NoAck {
- args = append(args, "noack")
- keyPos++
- }
- args = append(args, "streams")
- keyPos++
- for _, s := range a.Streams {
- args = append(args, s)
- }
-
- cmd := NewXStreamSliceCmd(ctx, args...)
- if a.Block >= 0 {
- cmd.setReadTimeout(a.Block)
- }
- cmd.SetFirstKeyPos(keyPos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
- args := []interface{}{"xack", stream, group}
- for _, id := range ids {
- args = append(args, id)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
- cmd := NewXPendingCmd(ctx, "xpending", stream, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XPendingExtArgs struct {
- Stream string
- Group string
- Idle time.Duration
- Start string
- End string
- Count int64
- Consumer string
-}
-
-func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
- args := make([]interface{}, 0, 9)
- args = append(args, "xpending", a.Stream, a.Group)
- if a.Idle != 0 {
- args = append(args, "idle", formatMs(ctx, a.Idle))
- }
- args = append(args, a.Start, a.End, a.Count)
- if a.Consumer != "" {
- args = append(args, a.Consumer)
- }
- cmd := NewXPendingExtCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XAutoClaimArgs struct {
- Stream string
- Group string
- MinIdle time.Duration
- Start string
- Count int64
- Consumer string
-}
-
-func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
- args := xAutoClaimArgs(ctx, a)
- cmd := NewXAutoClaimCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
- args := xAutoClaimArgs(ctx, a)
- args = append(args, "justid")
- cmd := NewXAutoClaimJustIDCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
- args := make([]interface{}, 0, 8)
- args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
- if a.Count > 0 {
- args = append(args, "count", a.Count)
- }
- return args
-}
-
-type XClaimArgs struct {
- Stream string
- Group string
- Consumer string
- MinIdle time.Duration
- Messages []string
-}
-
-func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
- args := xClaimArgs(a)
- cmd := NewXMessageSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
- args := xClaimArgs(a)
- args = append(args, "justid")
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func xClaimArgs(a *XClaimArgs) []interface{} {
- args := make([]interface{}, 0, 5+len(a.Messages))
- args = append(args,
- "xclaim",
- a.Stream,
- a.Group, a.Consumer,
- int64(a.MinIdle/time.Millisecond))
- for _, id := range a.Messages {
- args = append(args, id)
- }
- return args
-}
-
-// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
-// example:
-// XTRIM key MAXLEN/MINID threshold LIMIT limit.
-// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
-// The redis-server version is lower than 6.2, please set limit to 0.
-func (c cmdable) xTrim(
- ctx context.Context, key, strategy string,
- approx bool, threshold interface{}, limit int64,
-) *IntCmd {
- args := make([]interface{}, 0, 7)
- args = append(args, "xtrim", key, strategy)
- if approx {
- args = append(args, "~")
- }
- args = append(args, threshold)
- if limit > 0 {
- args = append(args, "limit", limit)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Deprecated: use XTrimMaxLen, remove in v9.
-func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
-}
-
-// Deprecated: use XTrimMaxLenApprox, remove in v9.
-func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", true, maxLen, 0)
-}
-
-// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
-// cmd: XTRIM key MAXLEN maxLen
-func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
-}
-
-// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit
-func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
-}
-
-// XTrimMinID No `~` rules are used, `limit` cannot be used.
-// cmd: XTRIM key MINID minID
-func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
- return c.xTrim(ctx, key, "minid", false, minID, 0)
-}
-
-// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-// cmd: XTRIM key MINID ~ minID LIMIT limit
-func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
- return c.xTrim(ctx, key, "minid", true, minID, limit)
-}
-
-func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
- cmd := NewXInfoConsumersCmd(ctx, key, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
- cmd := NewXInfoGroupsCmd(ctx, key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
- cmd := NewXInfoStreamCmd(ctx, key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// XInfoStreamFull XINFO STREAM FULL [COUNT count]
-// redis-server >= 6.0.
-func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
- args := make([]interface{}, 0, 6)
- args = append(args, "xinfo", "stream", key, "full")
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewXInfoStreamFullCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// Z represents sorted set member.
-type Z struct {
- Score float64
- Member interface{}
-}
-
-// ZWithKey represents sorted set member including the name of the key where it was popped.
-type ZWithKey struct {
- Z
- Key string
-}
-
-// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
-type ZStore struct {
- Keys []string
- Weights []float64
- // Can be SUM, MIN or MAX.
- Aggregate string
-}
-
-func (z ZStore) len() (n int) {
- n = len(z.Keys)
- if len(z.Weights) > 0 {
- n += 1 + len(z.Weights)
- }
- if z.Aggregate != "" {
- n += 2
- }
- return n
-}
-
-func (z ZStore) appendArgs(args []interface{}) []interface{} {
- for _, key := range z.Keys {
- args = append(args, key)
- }
- if len(z.Weights) > 0 {
- args = append(args, "weights")
- for _, weights := range z.Weights {
- args = append(args, weights)
- }
- }
- if z.Aggregate != "" {
- args = append(args, "aggregate", z.Aggregate)
- }
- return args
-}
-
-// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
-func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "bzpopmax"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewZWithKeyCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
-func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "bzpopmin"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewZWithKeyCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
-type ZAddArgs struct {
- NX bool
- XX bool
- LT bool
- GT bool
- Ch bool
- Members []Z
-}
-
-func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
- a := make([]interface{}, 0, 6+2*len(args.Members))
- a = append(a, "zadd", key)
-
- // The GT, LT and NX options are mutually exclusive.
- if args.NX {
- a = append(a, "nx")
- } else {
- if args.XX {
- a = append(a, "xx")
- }
- if args.GT {
- a = append(a, "gt")
- } else if args.LT {
- a = append(a, "lt")
- }
- }
- if args.Ch {
- a = append(a, "ch")
- }
- if incr {
- a = append(a, "incr")
- }
- for _, m := range args.Members {
- a = append(a, m.Score)
- a = append(a, m.Member)
- }
- return a
-}
-
-func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
- cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
- cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// TODO: Compatible with v8 api, will be removed in v9.
-func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd {
- args.Members = make([]Z, len(members))
- for i, m := range members {
- args.Members[i] = *m
- }
- cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZAdd Redis `ZADD key score member [score member ...]` command.
-func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{}, members...)
-}
-
-// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
-func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- NX: true,
- }, members...)
-}
-
-// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
-func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- XX: true,
- }, members...)
-}
-
-// ZAddCh Redis `ZADD key CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- Ch: true,
- }, members...)
-}
-
-// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// NX: true,
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- NX: true,
- Ch: true,
- }, members...)
-}
-
-// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// XX: true,
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- XX: true,
- Ch: true,
- }, members...)
-}
-
-// ZIncr Redis `ZADD key INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- Members: []Z{*member},
- })
-}
-
-// ZIncrNX Redis `ZADD key NX INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// NX: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- NX: true,
- Members: []Z{*member},
- })
-}
-
-// ZIncrXX Redis `ZADD key XX INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// XX: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- XX: true,
- Members: []Z{*member},
- })
-}
-
-func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "zcard", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zcount", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
- cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zinterstore", destination, len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
- args := make([]interface{}, 0, 2+store.len())
- args = append(args, "zinter", len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zinter", len(store.Keys))
- args = store.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "zmscore"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewFloatSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
- args := []interface{}{
- "zpopmax",
- key,
- }
-
- switch len(count) {
- case 0:
- break
- case 1:
- args = append(args, count[0])
- default:
- panic("too many arguments")
- }
-
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
- args := []interface{}{
- "zpopmin",
- key,
- }
-
- switch len(count) {
- case 0:
- break
- case 1:
- args = append(args, count[0])
- default:
- panic("too many arguments")
- }
-
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZRangeArgs is all the options of the ZRange command.
-// In version> 6.2.0, you can replace the(cmd):
-// ZREVRANGE,
-// ZRANGEBYSCORE,
-// ZREVRANGEBYSCORE,
-// ZRANGEBYLEX,
-// ZREVRANGEBYLEX.
-// Please pay attention to your redis-server version.
-//
-// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
-type ZRangeArgs struct {
- Key string
-
- // When the ByScore option is provided, the open interval(exclusive) can be set.
- // By default, the score intervals specified by <Start> and <Stop> are closed (inclusive).
- // It is similar to the deprecated(6.2.0+) ZRangeByScore command.
- // For example:
- // ZRangeArgs{
- // Key: "example-key",
- // Start: "(3",
- // Stop: 8,
- // ByScore: true,
- // }
- // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8).
- //
- // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
- // You can set the <Start> and <Stop> options as follows:
- // ZRangeArgs{
- // Key: "example-key",
- // Start: "[abc",
- // Stop: "(def",
- // ByLex: true,
- // }
- // cmd: "ZRange example-key [abc (def ByLex"
- //
- // For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int).
- // You can read the documentation for more information: https://redis.io/commands/zrange
- Start interface{}
- Stop interface{}
-
- // The ByScore and ByLex options are mutually exclusive.
- ByScore bool
- ByLex bool
-
- Rev bool
-
- // limit offset count.
- Offset int64
- Count int64
-}
-
-func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
- // For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>.
- if z.Rev && (z.ByScore || z.ByLex) {
- args = append(args, z.Key, z.Stop, z.Start)
- } else {
- args = append(args, z.Key, z.Start, z.Stop)
- }
-
- if z.ByScore {
- args = append(args, "byscore")
- } else if z.ByLex {
- args = append(args, "bylex")
- }
- if z.Rev {
- args = append(args, "rev")
- }
- if z.Offset != 0 || z.Count != 0 {
- args = append(args, "limit", z.Offset, z.Count)
- }
- return args
-}
-
-func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
- args := make([]interface{}, 0, 9)
- args = append(args, "zrange")
- args = z.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
- args := make([]interface{}, 0, 10)
- args = append(args, "zrange")
- args = z.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- return c.ZRangeArgs(ctx, ZRangeArgs{
- Key: key,
- Start: start,
- Stop: stop,
- })
-}
-
-func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
- return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
- Key: key,
- Start: start,
- Stop: stop,
- })
-}
-
-type ZRangeBy struct {
- Min, Max string
- Offset, Count int64
-}
-
-func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
- args := []interface{}{zcmd, key, opt.Min, opt.Max}
- if withScores {
- args = append(args, "withscores")
- }
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
-}
-
-func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
-}
-
-func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
- args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
- args := make([]interface{}, 0, 10)
- args = append(args, "zrangestore", dst)
- args = z.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
- cmd := NewIntCmd(ctx, "zrank", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "zrem"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
- cmd := NewIntCmd(
- ctx,
- "zremrangebyrank",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
- cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
- args := []interface{}{zcmd, key, opt.Max, opt.Min}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
-}
-
-func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
-}
-
-func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
- args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
- cmd := NewIntCmd(ctx, "zrevrank", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
- cmd := NewFloatCmd(ctx, "zscore", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
- args := make([]interface{}, 0, 2+store.len())
- args = append(args, "zunion", len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zunion", len(store.Keys))
- args = store.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zunionstore", dest, len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZRandMember redis-server version >= 6.2.0.
-func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd {
- args := make([]interface{}, 0, 4)
-
- // Although count=0 is meaningless, redis accepts count=0.
- args = append(args, "zrandmember", key, count)
- if withScores {
- args = append(args, "withscores")
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiff redis-server version >= 6.2.0.
-func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "zdiff"
- args[1] = len(keys)
- for i, key := range keys {
- args[i+2] = key
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiffWithScores redis-server version >= 6.2.0.
-func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
- args := make([]interface{}, 3+len(keys))
- args[0] = "zdiff"
- args[1] = len(keys)
- for i, key := range keys {
- args[i+2] = key
- }
- args[len(keys)+2] = "withscores"
-
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiffStore redis-server version >=6.2.0.
-func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 0, 3+len(keys))
- args = append(args, "zdiffstore", destination, len(keys))
- for _, key := range keys {
- args = append(args, key)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(els))
- args[0] = "pfadd"
- args[1] = key
- args = appendArgs(args, els)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "pfcount"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "pfmerge"
- args[1] = dest
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "bgrewriteaof")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "bgsave")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientKillByFilter is new style syntax, while the ClientKill is old
-//
-// CLIENT KILL <option> [value] ... <option> [value]
-func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "client"
- args[1] = "kill"
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientList(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "client", "list")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
- cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientID(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "id")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "unblock", id)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigGet(ctx context.Context, parameter string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "config", "get", parameter)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "resetstat")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "rewrite")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DBSize(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "dbsize")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushall")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushall", "async")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushdb")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushdb", "async")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Info(ctx context.Context, section ...string) *StringCmd {
- args := []interface{}{"info"}
- if len(section) > 0 {
- args = append(args, section[0])
- }
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LastSave(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "lastsave")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Save(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "save")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
- var args []interface{}
- if modifier == "" {
- args = []interface{}{"shutdown"}
- } else {
- args = []interface{}{"shutdown", modifier}
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- if err := cmd.Err(); err != nil {
- if err == io.EOF {
- // Server quit as expected.
- cmd.err = nil
- }
- } else {
- // Server did not quit. String reply contains the reason.
- cmd.err = errors.New(cmd.val)
- cmd.val = ""
- }
- return cmd
-}
-
-func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "")
-}
-
-func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "save")
-}
-
-func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "nosave")
-}
-
-func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "slaveof", host, port)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
- cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Sync(_ context.Context) {
- panic("not implemented")
-}
-
-func (c cmdable) Time(ctx context.Context) *TimeCmd {
- cmd := NewTimeCmd(ctx, "time")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "debug", "object", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "readonly")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "readwrite")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
- args := []interface{}{"memory", "usage", key}
- if len(samples) > 0 {
- if len(samples) != 1 {
- panic("MemoryUsage expects single sample count")
- }
- args = append(args, "SAMPLES", samples[0])
- }
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
- cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
- cmdArgs[0] = "eval"
- cmdArgs[1] = script
- cmdArgs[2] = len(keys)
- for i, key := range keys {
- cmdArgs[3+i] = key
- }
- cmdArgs = appendArgs(cmdArgs, args)
- cmd := NewCmd(ctx, cmdArgs...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
- cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
- cmdArgs[0] = "evalsha"
- cmdArgs[1] = sha1
- cmdArgs[2] = len(keys)
- for i, key := range keys {
- cmdArgs[3+i] = key
- }
- cmdArgs = appendArgs(cmdArgs, args)
- cmd := NewCmd(ctx, cmdArgs...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
- args := make([]interface{}, 2+len(hashes))
- args[0] = "script"
- args[1] = "exists"
- for i, hash := range hashes {
- args[2+i] = hash
- }
- cmd := NewBoolSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "script", "flush")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "script", "kill")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
- cmd := NewStringCmd(ctx, "script", "load", script)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// Publish posts the message to the channel.
-func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "publish", channel, message)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
- args := []interface{}{"pubsub", "channels"}
- if pattern != "*" {
- args = append(args, pattern)
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd {
- args := make([]interface{}, 2+len(channels))
- args[0] = "pubsub"
- args[1] = "numsub"
- for i, channel := range channels {
- args[2+i] = channel
- }
- cmd := NewStringIntMapCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "pubsub", "numpat")
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
- cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "cluster", "nodes")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "cluster", "info")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
- args := make([]interface{}, 2+len(slots))
- args[0] = "cluster"
- args[1] = "delslots"
- for i, slot := range slots {
- args[2+i] = slot
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
- size := max - min + 1
- slots := make([]int, size)
- for i := 0; i < size; i++ {
- slots[i] = min + i
- }
- return c.ClusterDelSlots(ctx, slots...)
-}
-
-func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "failover")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
- args := make([]interface{}, 2+len(slots))
- args[0] = "cluster"
- args[1] = "addslots"
- for i, num := range slots {
- args[2+i] = num
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
- size := max - min + 1
- slots := make([]int, size)
- for i := 0; i < size; i++ {
- slots[i] = min + i
- }
- return c.ClusterAddSlots(ctx, slots...)
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
- args := make([]interface{}, 2+3*len(geoLocation))
- args[0] = "geoadd"
- args[1] = key
- for i, eachLoc := range geoLocation {
- args[2+3*i] = eachLoc.Longitude
- args[2+3*i+1] = eachLoc.Latitude
- args[2+3*i+2] = eachLoc.Name
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadius is a read-only GEORADIUS_RO command.
-func (c cmdable) GeoRadius(
- ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
-) *GeoLocationCmd {
- cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
- if query.Store != "" || query.StoreDist != "" {
- cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusStore is a writing GEORADIUS command.
-func (c cmdable) GeoRadiusStore(
- ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
-) *IntCmd {
- args := geoLocationArgs(query, "georadius", key, longitude, latitude)
- cmd := NewIntCmd(ctx, args...)
- if query.Store == "" && query.StoreDist == "" {
- cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
-func (c cmdable) GeoRadiusByMember(
- ctx context.Context, key, member string, query *GeoRadiusQuery,
-) *GeoLocationCmd {
- cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
- if query.Store != "" || query.StoreDist != "" {
- cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
-func (c cmdable) GeoRadiusByMemberStore(
- ctx context.Context, key, member string, query *GeoRadiusQuery,
-) *IntCmd {
- args := geoLocationArgs(query, "georadiusbymember", key, member)
- cmd := NewIntCmd(ctx, args...)
- if query.Store == "" && query.StoreDist == "" {
- cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
- args := make([]interface{}, 0, 13)
- args = append(args, "geosearch", key)
- args = geoSearchArgs(q, args)
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearchLocation(
- ctx context.Context, key string, q *GeoSearchLocationQuery,
-) *GeoSearchLocationCmd {
- args := make([]interface{}, 0, 16)
- args = append(args, "geosearch", key)
- args = geoSearchLocationArgs(q, args)
- cmd := NewGeoSearchLocationCmd(ctx, q, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
- args := make([]interface{}, 0, 15)
- args = append(args, "geosearchstore", store, key)
- args = geoSearchArgs(&q.GeoSearchQuery, args)
- if q.StoreDist {
- args = append(args, "storedist")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoDist(
- ctx context.Context, key string, member1, member2, unit string,
-) *FloatCmd {
- if unit == "" {
- unit = "km"
- }
- cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "geohash"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "geopos"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewGeoPosCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod
deleted file mode 100644
index d2610c2..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod
+++ /dev/null
@@ -1,20 +0,0 @@
-module github.com/go-redis/redis/v8
-
-go 1.17
-
-require (
- github.com/cespare/xxhash/v2 v2.1.2
- github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
- github.com/onsi/ginkgo v1.16.5
- github.com/onsi/gomega v1.18.1
-)
-
-require (
- github.com/fsnotify/fsnotify v1.4.9 // indirect
- github.com/nxadm/tail v1.4.8 // indirect
- golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 // indirect
- golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
- golang.org/x/text v0.3.6 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
-)
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum
deleted file mode 100644
index e88f31a..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum
+++ /dev/null
@@ -1,108 +0,0 @@
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
-github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
-github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ=
-github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
-github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go
deleted file mode 100644
index 2365dbc..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package pool_test
-
-import (
- "context"
- "net"
- "sync"
- "testing"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-func TestGinkgoSuite(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "pool")
-}
-
-func perform(n int, cbs ...func(int)) {
- var wg sync.WaitGroup
- for _, cb := range cbs {
- for i := 0; i < n; i++ {
- wg.Add(1)
- go func(cb func(int), i int) {
- defer GinkgoRecover()
- defer wg.Done()
-
- cb(i)
- }(cb, i)
- }
- }
- wg.Wait()
-}
-
-func dummyDialer(context.Context) (net.Conn, error) {
- return &net.TCPConn{}, nil
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go
deleted file mode 100644
index 0e6ca77..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go
+++ /dev/null
@@ -1,332 +0,0 @@
-package proto
-
-import (
- "bufio"
- "fmt"
- "io"
-
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-// redis resp protocol data type.
-const (
- ErrorReply = '-'
- StatusReply = '+'
- IntReply = ':'
- StringReply = '$'
- ArrayReply = '*'
-)
-
-//------------------------------------------------------------------------------
-
-const Nil = RedisError("redis: nil") // nolint:errname
-
-type RedisError string
-
-func (e RedisError) Error() string { return string(e) }
-
-func (RedisError) RedisError() {}
-
-//------------------------------------------------------------------------------
-
-type MultiBulkParse func(*Reader, int64) (interface{}, error)
-
-type Reader struct {
- rd *bufio.Reader
- _buf []byte
-}
-
-func NewReader(rd io.Reader) *Reader {
- return &Reader{
- rd: bufio.NewReader(rd),
- _buf: make([]byte, 64),
- }
-}
-
-func (r *Reader) Buffered() int {
- return r.rd.Buffered()
-}
-
-func (r *Reader) Peek(n int) ([]byte, error) {
- return r.rd.Peek(n)
-}
-
-func (r *Reader) Reset(rd io.Reader) {
- r.rd.Reset(rd)
-}
-
-func (r *Reader) ReadLine() ([]byte, error) {
- line, err := r.readLine()
- if err != nil {
- return nil, err
- }
- if isNilReply(line) {
- return nil, Nil
- }
- return line, nil
-}
-
-// readLine that returns an error if:
-// - there is a pending read error;
-// - or line does not end with \r\n.
-func (r *Reader) readLine() ([]byte, error) {
- b, err := r.rd.ReadSlice('\n')
- if err != nil {
- if err != bufio.ErrBufferFull {
- return nil, err
- }
-
- full := make([]byte, len(b))
- copy(full, b)
-
- b, err = r.rd.ReadBytes('\n')
- if err != nil {
- return nil, err
- }
-
- full = append(full, b...) //nolint:makezero
- b = full
- }
- if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
- return nil, fmt.Errorf("redis: invalid reply: %q", b)
- }
- return b[:len(b)-2], nil
-}
-
-func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
-
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case StatusReply:
- return string(line[1:]), nil
- case IntReply:
- return util.ParseInt(line[1:], 10, 64)
- case StringReply:
- return r.readStringReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return nil, err
- }
- if m == nil {
- err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
- return nil, err
- }
- return m(r, n)
- }
- return nil, fmt.Errorf("redis: can't parse %.100q", line)
-}
-
-func (r *Reader) ReadIntReply() (int64, error) {
- line, err := r.ReadLine()
- if err != nil {
- return 0, err
- }
- switch line[0] {
- case ErrorReply:
- return 0, ParseErrorReply(line)
- case IntReply:
- return util.ParseInt(line[1:], 10, 64)
- default:
- return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadString() (string, error) {
- line, err := r.ReadLine()
- if err != nil {
- return "", err
- }
- switch line[0] {
- case ErrorReply:
- return "", ParseErrorReply(line)
- case StringReply:
- return r.readStringReply(line)
- case StatusReply:
- return string(line[1:]), nil
- case IntReply:
- return string(line[1:]), nil
- default:
- return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
- }
-}
-
-func (r *Reader) readStringReply(line []byte) (string, error) {
- if isNilReply(line) {
- return "", Nil
- }
-
- replyLen, err := util.Atoi(line[1:])
- if err != nil {
- return "", err
- }
-
- b := make([]byte, replyLen+2)
- _, err = io.ReadFull(r.rd, b)
- if err != nil {
- return "", err
- }
-
- return util.BytesToString(b[:replyLen]), nil
-}
-
-func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return nil, err
- }
- return m(r, n)
- default:
- return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadArrayLen() (int, error) {
- line, err := r.ReadLine()
- if err != nil {
- return 0, err
- }
- switch line[0] {
- case ErrorReply:
- return 0, ParseErrorReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return 0, err
- }
- return int(n), nil
- default:
- return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadScanReply() ([]string, uint64, error) {
- n, err := r.ReadArrayLen()
- if err != nil {
- return nil, 0, err
- }
- if n != 2 {
- return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
- }
-
- cursor, err := r.ReadUint()
- if err != nil {
- return nil, 0, err
- }
-
- n, err = r.ReadArrayLen()
- if err != nil {
- return nil, 0, err
- }
-
- keys := make([]string, n)
-
- for i := 0; i < n; i++ {
- key, err := r.ReadString()
- if err != nil {
- return nil, 0, err
- }
- keys[i] = key
- }
-
- return keys, cursor, err
-}
-
-func (r *Reader) ReadInt() (int64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseInt(b, 10, 64)
-}
-
-func (r *Reader) ReadUint() (uint64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseUint(b, 10, 64)
-}
-
-func (r *Reader) ReadFloatReply() (float64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseFloat(b, 64)
-}
-
-func (r *Reader) readTmpBytesReply() ([]byte, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case StringReply:
- return r._readTmpBytesReply(line)
- case StatusReply:
- return line[1:], nil
- default:
- return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
- }
-}
-
-func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
- if isNilReply(line) {
- return nil, Nil
- }
-
- replyLen, err := util.Atoi(line[1:])
- if err != nil {
- return nil, err
- }
-
- buf := r.buf(replyLen + 2)
- _, err = io.ReadFull(r.rd, buf)
- if err != nil {
- return nil, err
- }
-
- return buf[:replyLen], nil
-}
-
-func (r *Reader) buf(n int) []byte {
- if n <= cap(r._buf) {
- return r._buf[:n]
- }
- d := n - cap(r._buf)
- r._buf = append(r._buf, make([]byte, d)...)
- return r._buf
-}
-
-func isNilReply(b []byte) bool {
- return len(b) == 3 &&
- (b[0] == StringReply || b[0] == ArrayReply) &&
- b[1] == '-' && b[2] == '1'
-}
-
-func ParseErrorReply(line []byte) error {
- return RedisError(string(line[1:]))
-}
-
-func parseArrayLen(line []byte) (int64, error) {
- if isNilReply(line) {
- return 0, Nil
- }
- return util.ParseInt(line[1:], 10, 64)
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go
deleted file mode 100644
index b8c99dd..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package proto_test
-
-import (
- "bytes"
- "io"
- "testing"
-
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-func BenchmarkReader_ParseReply_Status(b *testing.B) {
- benchmarkParseReply(b, "+OK\r\n", nil, false)
-}
-
-func BenchmarkReader_ParseReply_Int(b *testing.B) {
- benchmarkParseReply(b, ":1\r\n", nil, false)
-}
-
-func BenchmarkReader_ParseReply_Error(b *testing.B) {
- benchmarkParseReply(b, "-Error message\r\n", nil, true)
-}
-
-func BenchmarkReader_ParseReply_String(b *testing.B) {
- benchmarkParseReply(b, "$5\r\nhello\r\n", nil, false)
-}
-
-func BenchmarkReader_ParseReply_Slice(b *testing.B) {
- benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", multiBulkParse, false)
-}
-
-func TestReader_ReadLine(t *testing.T) {
- original := bytes.Repeat([]byte("a"), 8192)
- original[len(original)-2] = '\r'
- original[len(original)-1] = '\n'
- r := proto.NewReader(bytes.NewReader(original))
- read, err := r.ReadLine()
- if err != nil && err != io.EOF {
- t.Errorf("Should be able to read the full buffer: %v", err)
- }
-
- if bytes.Compare(read, original[:len(original)-2]) != 0 {
- t.Errorf("Values must be equal: %d expected %d", len(read), len(original[:len(original)-2]))
- }
-}
-
-func benchmarkParseReply(b *testing.B, reply string, m proto.MultiBulkParse, wanterr bool) {
- buf := new(bytes.Buffer)
- for i := 0; i < b.N; i++ {
- buf.WriteString(reply)
- }
- p := proto.NewReader(buf)
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- _, err := p.ReadReply(m)
- if !wanterr && err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func multiBulkParse(p *proto.Reader, n int64) (interface{}, error) {
- vv := make([]interface{}, 0, n)
- for i := int64(0); i < n; i++ {
- v, err := p.ReadReply(multiBulkParse)
- if err != nil {
- return nil, err
- }
- vv = append(vv, v)
- }
- return vv, nil
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go
deleted file mode 100644
index ebae569..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package proto_test
-
-import (
- "bytes"
- "encoding"
- "testing"
- "time"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-type MyType struct{}
-
-var _ encoding.BinaryMarshaler = (*MyType)(nil)
-
-func (t *MyType) MarshalBinary() ([]byte, error) {
- return []byte("hello"), nil
-}
-
-var _ = Describe("WriteBuffer", func() {
- var buf *bytes.Buffer
- var wr *proto.Writer
-
- BeforeEach(func() {
- buf = new(bytes.Buffer)
- wr = proto.NewWriter(buf)
- })
-
- It("should write args", func() {
- err := wr.WriteArgs([]interface{}{
- "string",
- 12,
- 34.56,
- []byte{'b', 'y', 't', 'e', 's'},
- true,
- nil,
- })
- Expect(err).NotTo(HaveOccurred())
-
- Expect(buf.Bytes()).To(Equal([]byte("*6\r\n" +
- "$6\r\nstring\r\n" +
- "$2\r\n12\r\n" +
- "$5\r\n34.56\r\n" +
- "$5\r\nbytes\r\n" +
- "$1\r\n1\r\n" +
- "$0\r\n" +
- "\r\n")))
- })
-
- It("should append time", func() {
- tm := time.Date(2019, 1, 1, 9, 45, 10, 222125, time.UTC)
- err := wr.WriteArgs([]interface{}{tm})
- Expect(err).NotTo(HaveOccurred())
-
- Expect(buf.Len()).To(Equal(41))
- })
-
- It("should append marshalable args", func() {
- err := wr.WriteArgs([]interface{}{&MyType{}})
- Expect(err).NotTo(HaveOccurred())
-
- Expect(buf.Len()).To(Equal(15))
- })
-})
-
-type discard struct{}
-
-func (discard) Write(b []byte) (int, error) {
- return len(b), nil
-}
-
-func (discard) WriteString(s string) (int, error) {
- return len(s), nil
-}
-
-func (discard) WriteByte(c byte) error {
- return nil
-}
-
-func BenchmarkWriteBuffer_Append(b *testing.B) {
- buf := proto.NewWriter(discard{})
- args := []interface{}{"hello", "world", "foo", "bar"}
-
- for i := 0; i < b.N; i++ {
- err := buf.WriteArgs(args)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go
deleted file mode 100644
index fd2f434..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build appengine
-// +build appengine
-
-package internal
-
-func String(b []byte) string {
- return string(b)
-}
-
-func Bytes(s string) []byte {
- return []byte(s)
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go
deleted file mode 100644
index 9f2e418..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build !appengine
-// +build !appengine
-
-package internal
-
-import "unsafe"
-
-// String converts byte slice to string.
-func String(b []byte) string {
- return *(*string)(unsafe.Pointer(&b))
-}
-
-// Bytes converts string to byte slice.
-func Bytes(s string) []byte {
- return *(*[]byte)(unsafe.Pointer(
- &struct {
- string
- Cap int
- }{s, len(s)},
- ))
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go
deleted file mode 100644
index b1dd0bd..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package redis
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("newClusterState", func() {
- var state *clusterState
-
- createClusterState := func(slots []ClusterSlot) *clusterState {
- opt := &ClusterOptions{}
- opt.init()
- nodes := newClusterNodes(opt)
- state, err := newClusterState(nodes, slots, "10.10.10.10:1234")
- Expect(err).NotTo(HaveOccurred())
- return state
- }
-
- Describe("sorting", func() {
- BeforeEach(func() {
- state = createClusterState([]ClusterSlot{{
- Start: 1000,
- End: 1999,
- }, {
- Start: 0,
- End: 999,
- }, {
- Start: 2000,
- End: 2999,
- }})
- })
-
- It("sorts slots", func() {
- Expect(state.slots).To(Equal([]*clusterSlot{
- {start: 0, end: 999, nodes: nil},
- {start: 1000, end: 1999, nodes: nil},
- {start: 2000, end: 2999, nodes: nil},
- }))
- })
- })
-
- Describe("loopback", func() {
- BeforeEach(func() {
- state = createClusterState([]ClusterSlot{{
- Nodes: []ClusterNode{{Addr: "127.0.0.1:7001"}},
- }, {
- Nodes: []ClusterNode{{Addr: "127.0.0.1:7002"}},
- }, {
- Nodes: []ClusterNode{{Addr: "1.2.3.4:1234"}},
- }, {
- Nodes: []ClusterNode{{Addr: ":1234"}},
- }})
- })
-
- It("replaces loopback hosts in addresses", func() {
- slotAddr := func(slot *clusterSlot) string {
- return slot.nodes[0].Client.Options().Addr
- }
-
- Expect(slotAddr(state.slots[0])).To(Equal("10.10.10.10:7001"))
- Expect(slotAddr(state.slots[1])).To(Equal("10.10.10.10:7002"))
- Expect(slotAddr(state.slots[2])).To(Equal("1.2.3.4:1234"))
- Expect(slotAddr(state.slots[3])).To(Equal(":1234"))
- })
- })
-})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json
deleted file mode 100644
index e4ea4bb..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "name": "redis",
- "version": "8.11.5",
- "main": "index.js",
- "repository": "git@github.com:go-redis/redis.git",
- "author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
- "license": "BSD-2-clause"
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go
deleted file mode 100644
index bcf8a2a..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go
+++ /dev/null
@@ -1,773 +0,0 @@
-package redis
-
-import (
- "context"
- "errors"
- "fmt"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-// Nil reply returned by Redis when key does not exist.
-const Nil = proto.Nil
-
-func SetLogger(logger internal.Logging) {
- internal.Logger = logger
-}
-
-//------------------------------------------------------------------------------
-
-type Hook interface {
- BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
- AfterProcess(ctx context.Context, cmd Cmder) error
-
- BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
- AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
-}
-
-type hooks struct {
- hooks []Hook
-}
-
-func (hs *hooks) lock() {
- hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
-}
-
-func (hs hooks) clone() hooks {
- clone := hs
- clone.lock()
- return clone
-}
-
-func (hs *hooks) AddHook(hook Hook) {
- hs.hooks = append(hs.hooks, hook)
-}
-
-func (hs hooks) process(
- ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
-) error {
- if len(hs.hooks) == 0 {
- err := fn(ctx, cmd)
- cmd.SetErr(err)
- return err
- }
-
- var hookIndex int
- var retErr error
-
- for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
- ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
- if retErr != nil {
- cmd.SetErr(retErr)
- }
- }
-
- if retErr == nil {
- retErr = fn(ctx, cmd)
- cmd.SetErr(retErr)
- }
-
- for hookIndex--; hookIndex >= 0; hookIndex-- {
- if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
- retErr = err
- cmd.SetErr(retErr)
- }
- }
-
- return retErr
-}
-
-func (hs hooks) processPipeline(
- ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
-) error {
- if len(hs.hooks) == 0 {
- err := fn(ctx, cmds)
- return err
- }
-
- var hookIndex int
- var retErr error
-
- for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
- ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
- if retErr != nil {
- setCmdsErr(cmds, retErr)
- }
- }
-
- if retErr == nil {
- retErr = fn(ctx, cmds)
- }
-
- for hookIndex--; hookIndex >= 0; hookIndex-- {
- if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
- retErr = err
- setCmdsErr(cmds, retErr)
- }
- }
-
- return retErr
-}
-
-func (hs hooks) processTxPipeline(
- ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
-) error {
- cmds = wrapMultiExec(ctx, cmds)
- return hs.processPipeline(ctx, cmds, fn)
-}
-
-//------------------------------------------------------------------------------
-
-type baseClient struct {
- opt *Options
- connPool pool.Pooler
-
- onClose func() error // hook called when client is closed
-}
-
-func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
- return &baseClient{
- opt: opt,
- connPool: connPool,
- }
-}
-
-func (c *baseClient) clone() *baseClient {
- clone := *c
- return &clone
-}
-
-func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
- opt := c.opt.clone()
- opt.ReadTimeout = timeout
- opt.WriteTimeout = timeout
-
- clone := c.clone()
- clone.opt = opt
-
- return clone
-}
-
-func (c *baseClient) String() string {
- return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
-}
-
-func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
- cn, err := c.connPool.NewConn(ctx)
- if err != nil {
- return nil, err
- }
-
- err = c.initConn(ctx, cn)
- if err != nil {
- _ = c.connPool.CloseConn(cn)
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
- if c.opt.Limiter != nil {
- err := c.opt.Limiter.Allow()
- if err != nil {
- return nil, err
- }
- }
-
- cn, err := c._getConn(ctx)
- if err != nil {
- if c.opt.Limiter != nil {
- c.opt.Limiter.ReportResult(err)
- }
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
- cn, err := c.connPool.Get(ctx)
- if err != nil {
- return nil, err
- }
-
- if cn.Inited {
- return cn, nil
- }
-
- if err := c.initConn(ctx, cn); err != nil {
- c.connPool.Remove(ctx, cn, err)
- if err := errors.Unwrap(err); err != nil {
- return nil, err
- }
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
- if cn.Inited {
- return nil
- }
- cn.Inited = true
-
- if c.opt.Password == "" &&
- c.opt.DB == 0 &&
- !c.opt.readOnly &&
- c.opt.OnConnect == nil {
- return nil
- }
-
- connPool := pool.NewSingleConnPool(c.connPool, cn)
- conn := newConn(ctx, c.opt, connPool)
-
- _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
- if c.opt.Password != "" {
- if c.opt.Username != "" {
- pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
- } else {
- pipe.Auth(ctx, c.opt.Password)
- }
- }
-
- if c.opt.DB > 0 {
- pipe.Select(ctx, c.opt.DB)
- }
-
- if c.opt.readOnly {
- pipe.ReadOnly(ctx)
- }
-
- return nil
- })
- if err != nil {
- return err
- }
-
- if c.opt.OnConnect != nil {
- return c.opt.OnConnect(ctx, conn)
- }
- return nil
-}
-
-func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
- if c.opt.Limiter != nil {
- c.opt.Limiter.ReportResult(err)
- }
-
- if isBadConn(err, false, c.opt.Addr) {
- c.connPool.Remove(ctx, cn, err)
- } else {
- c.connPool.Put(ctx, cn)
- }
-}
-
-func (c *baseClient) withConn(
- ctx context.Context, fn func(context.Context, *pool.Conn) error,
-) error {
- cn, err := c.getConn(ctx)
- if err != nil {
- return err
- }
-
- defer func() {
- c.releaseConn(ctx, cn, err)
- }()
-
- done := ctx.Done() //nolint:ifshort
-
- if done == nil {
- err = fn(ctx, cn)
- return err
- }
-
- errc := make(chan error, 1)
- go func() { errc <- fn(ctx, cn) }()
-
- select {
- case <-done:
- _ = cn.Close()
- // Wait for the goroutine to finish and send something.
- <-errc
-
- err = ctx.Err()
- return err
- case err = <-errc:
- return err
- }
-}
-
-func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- attempt := attempt
-
- retry, err := c._process(ctx, cmd, attempt)
- if err == nil || !retry {
- return err
- }
-
- lastErr = err
- }
- return lastErr
-}
-
-func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return false, err
- }
- }
-
- retryTimeout := uint32(1)
- err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmd(wr, cmd)
- })
- if err != nil {
- return err
- }
-
- err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
- if err != nil {
- if cmd.readTimeout() == nil {
- atomic.StoreUint32(&retryTimeout, 1)
- }
- return err
- }
-
- return nil
- })
- if err == nil {
- return false, nil
- }
-
- retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
- return retry, err
-}
-
-func (c *baseClient) retryBackoff(attempt int) time.Duration {
- return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
-}
-
-func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
- if timeout := cmd.readTimeout(); timeout != nil {
- t := *timeout
- if t == 0 {
- return 0
- }
- return t + 10*time.Second
- }
- return c.opt.ReadTimeout
-}
-
-// Close closes the client, releasing any open resources.
-//
-// It is rare to Close a Client, as the Client is meant to be
-// long-lived and shared between many goroutines.
-func (c *baseClient) Close() error {
- var firstErr error
- if c.onClose != nil {
- if err := c.onClose(); err != nil {
- firstErr = err
- }
- }
- if err := c.connPool.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- return firstErr
-}
-
-func (c *baseClient) getAddr() string {
- return c.opt.Addr
-}
-
-func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
-}
-
-func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
-}
-
-type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
-
-func (c *baseClient) generalProcessPipeline(
- ctx context.Context, cmds []Cmder, p pipelineProcessor,
-) error {
- err := c._generalProcessPipeline(ctx, cmds, p)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
- return cmdsFirstErr(cmds)
-}
-
-func (c *baseClient) _generalProcessPipeline(
- ctx context.Context, cmds []Cmder, p pipelineProcessor,
-) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- var canRetry bool
- lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- var err error
- canRetry, err = p(ctx, cn, cmds)
- return err
- })
- if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
- return lastErr
- }
- }
- return lastErr
-}
-
-func (c *baseClient) pipelineProcessCmds(
- ctx context.Context, cn *pool.Conn, cmds []Cmder,
-) (bool, error) {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return true, err
- }
-
- err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- return pipelineReadCmds(rd, cmds)
- })
- return true, err
-}
-
-func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
- for _, cmd := range cmds {
- err := cmd.readReply(rd)
- cmd.SetErr(err)
- if err != nil && !isRedisError(err) {
- return err
- }
- }
- return nil
-}
-
-func (c *baseClient) txPipelineProcessCmds(
- ctx context.Context, cn *pool.Conn, cmds []Cmder,
-) (bool, error) {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return true, err
- }
-
- err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- statusCmd := cmds[0].(*StatusCmd)
- // Trim multi and exec.
- cmds = cmds[1 : len(cmds)-1]
-
- err := txPipelineReadQueued(rd, statusCmd, cmds)
- if err != nil {
- return err
- }
-
- return pipelineReadCmds(rd, cmds)
- })
- return false, err
-}
-
-func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
- if len(cmds) == 0 {
- panic("not reached")
- }
- cmdCopy := make([]Cmder, len(cmds)+2)
- cmdCopy[0] = NewStatusCmd(ctx, "multi")
- copy(cmdCopy[1:], cmds)
- cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
- return cmdCopy
-}
-
-func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
- // Parse queued replies.
- if err := statusCmd.readReply(rd); err != nil {
- return err
- }
-
- for range cmds {
- if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
- return err
- }
- }
-
- // Parse number of replies.
- line, err := rd.ReadLine()
- if err != nil {
- if err == Nil {
- err = TxFailedErr
- }
- return err
- }
-
- switch line[0] {
- case proto.ErrorReply:
- return proto.ParseErrorReply(line)
- case proto.ArrayReply:
- // ok
- default:
- err := fmt.Errorf("redis: expected '*', but got line %q", line)
- return err
- }
-
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-// Client is a Redis client representing a pool of zero or more
-// underlying connections. It's safe for concurrent use by multiple
-// goroutines.
-type Client struct {
- *baseClient
- cmdable
- hooks
- ctx context.Context
-}
-
-// NewClient returns a client to the Redis Server specified by Options.
-func NewClient(opt *Options) *Client {
- opt.init()
-
- c := Client{
- baseClient: newBaseClient(opt, newConnPool(opt)),
- ctx: context.Background(),
- }
- c.cmdable = c.Process
-
- return &c
-}
-
-func (c *Client) clone() *Client {
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- return &clone
-}
-
-func (c *Client) WithTimeout(timeout time.Duration) *Client {
- clone := c.clone()
- clone.baseClient = c.baseClient.withTimeout(timeout)
- return clone
-}
-
-func (c *Client) Context() context.Context {
- return c.ctx
-}
-
-func (c *Client) WithContext(ctx context.Context) *Client {
- if ctx == nil {
- panic("nil context")
- }
- clone := c.clone()
- clone.ctx = ctx
- return clone
-}
-
-func (c *Client) Conn(ctx context.Context) *Conn {
- return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
-}
-
-// Do creates a Cmd from the args and processes the cmd.
-func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-func (c *Client) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
-}
-
-func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
-}
-
-// Options returns read-only Options that were used to create the client.
-func (c *Client) Options() *Options {
- return c.opt
-}
-
-type PoolStats pool.Stats
-
-// PoolStats returns connection pool stats.
-func (c *Client) PoolStats() *PoolStats {
- stats := c.connPool.Stats()
- return (*PoolStats)(stats)
-}
-
-func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Client) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *Client) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Client) pubSub() *PubSub {
- pubsub := &PubSub{
- opt: c.opt,
-
- newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
- return c.newConn(ctx)
- },
- closeConn: c.connPool.CloseConn,
- }
- pubsub.init()
- return pubsub
-}
-
-// Subscribe subscribes the client to the specified channels.
-// Channels can be omitted to create empty subscription.
-// Note that this method does not wait on a response from Redis, so the
-// subscription may not be active immediately. To force the connection to wait,
-// you may call the Receive() method on the returned *PubSub like so:
-//
-// sub := client.Subscribe(queryResp)
-// iface, err := sub.Receive()
-// if err != nil {
-// // handle error
-// }
-//
-// // Should be *Subscription, but others are possible if other actions have been
-// // taken on sub since it was created.
-// switch iface.(type) {
-// case *Subscription:
-// // subscribe succeeded
-// case *Message:
-// // received first message
-// case *Pong:
-// // pong received
-// default:
-// // handle error
-// }
-//
-// ch := sub.Channel()
-func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.Subscribe(ctx, channels...)
- }
- return pubsub
-}
-
-// PSubscribe subscribes the client to the given patterns.
-// Patterns can be omitted to create empty subscription.
-func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.PSubscribe(ctx, channels...)
- }
- return pubsub
-}
-
-//------------------------------------------------------------------------------
-
-type conn struct {
- baseClient
- cmdable
- statefulCmdable
- hooks // TODO: inherit hooks
-}
-
-// Conn represents a single Redis connection rather than a pool of connections.
-// Prefer running commands from Client unless there is a specific need
-// for a continuous single Redis connection.
-type Conn struct {
- *conn
- ctx context.Context
-}
-
-func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
- c := Conn{
- conn: &conn{
- baseClient: baseClient{
- opt: opt,
- connPool: connPool,
- },
- },
- ctx: ctx,
- }
- c.cmdable = c.Process
- c.statefulCmdable = c.Process
- return &c
-}
-
-func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
-}
-
-func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
-}
-
-func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Conn) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *Conn) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf
deleted file mode 100644
index 235b295..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# Minimal redis.conf
-
-port 6379
-daemonize no
-dir .
-save ""
-appendonly yes
-cluster-config-file nodes.conf
-cluster-node-timeout 30000
-maxclients 1001 \ No newline at end of file
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fuzz.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fuzz.go
deleted file mode 100644
index 3a4ec25..0000000
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fuzz.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
-//
-// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-//go:build gofuzz
-// +build gofuzz
-
-package mysql
-
-import (
- "database/sql"
-)
-
-func Fuzz(data []byte) int {
- db, err := sql.Open("mysql", string(data))
- if err != nil {
- return 0
- }
- db.Close()
- return 1
-}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/go.mod b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/go.mod
deleted file mode 100644
index 2511104..0000000
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/go-sql-driver/mysql
-
-go 1.13
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/result.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/result.go
deleted file mode 100644
index c6438d0..0000000
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/result.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-type mysqlResult struct {
- affectedRows int64
- insertId int64
-}
-
-func (res *mysqlResult) LastInsertId() (int64, error) {
- return res.insertId, nil
-}
-
-func (res *mysqlResult) RowsAffected() (int64, error) {
- return res.affectedRows, nil
-}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/CONTRIBUTING.md b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/CONTRIBUTING.md
index 8fe16bc..8fe16bc 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/CONTRIBUTING.md
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/CONTRIBUTING.md
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/ISSUE_TEMPLATE.md b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/ISSUE_TEMPLATE.md
index d9771f1..d9771f1 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/ISSUE_TEMPLATE.md
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/ISSUE_TEMPLATE.md
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/PULL_REQUEST_TEMPLATE.md b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/PULL_REQUEST_TEMPLATE.md
index 6f5c7eb..6f5c7eb 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/PULL_REQUEST_TEMPLATE.md
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/PULL_REQUEST_TEMPLATE.md
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/codeql.yml b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/codeql.yml
index d9d29a8..83a3d6e 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/codeql.yml
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/codeql.yml
@@ -24,18 +24,18 @@ jobs:
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Initialize CodeQL
- uses: github/codeql-action/init@v2
+ uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
queries: +security-and-quality
- name: Autobuild
- uses: github/codeql-action/autobuild@v2
+ uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v2
+ uses: github/codeql-action/analyze@v3
with:
category: "/language:${{ matrix.language }}"
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/test.yml b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/test.yml
index d45ed0f..f5a1158 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/test.yml
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/test.yml
@@ -11,6 +11,14 @@ env:
MYSQL_TEST_CONCURRENT: 1
jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: dominikh/staticcheck-action@v1.3.0
+ with:
+ version: "2023.1.6"
+
list:
runs-on: ubuntu-latest
outputs:
@@ -23,17 +31,14 @@ jobs:
import os
go = [
# Keep the most recent production release at the top
- '1.20',
+ '1.21',
# Older production releases
+ '1.20',
'1.19',
'1.18',
- '1.17',
- '1.16',
- '1.15',
- '1.14',
- '1.13',
]
mysql = [
+ '8.1',
'8.0',
'5.7',
'5.6',
@@ -47,7 +52,7 @@ jobs:
includes = []
# Go versions compatibility check
for v in go[1:]:
- includes.append({'os': 'ubuntu-latest', 'go': v, 'mysql': mysql[0]})
+ includes.append({'os': 'ubuntu-latest', 'go': v, 'mysql': mysql[0]})
matrix = {
# OS vs MySQL versions
@@ -68,11 +73,11 @@ jobs:
fail-fast: false
matrix: ${{ fromJSON(needs.list.outputs.matrix) }}
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-go@v3
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
- - uses: shogo82148/actions-setup-mysql@v1.15.0
+ - uses: shogo82148/actions-setup-mysql@v1
with:
mysql-version: ${{ matrix.mysql }}
user: ${{ env.MYSQL_TEST_USER }}
@@ -84,13 +89,14 @@ jobs:
; TestConcurrent fails if max_connections is too large
max_connections=50
local_infile=1
+ performance_schema=on
- name: setup database
run: |
mysql --user 'root' --host '127.0.0.1' -e 'create database gotest;'
- name: test
run: |
- go test -v '-covermode=count' '-coverprofile=coverage.out'
+ go test -v '-race' '-covermode=atomic' '-coverprofile=coverage.out' -parallel 10
- name: Send coverage
uses: shogo82148/actions-goveralls@v1
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.gitignore b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.gitignore
index 2de28da..2de28da 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.gitignore
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.gitignore
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/AUTHORS b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/AUTHORS
index fb1478c..4021b96 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/AUTHORS
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/AUTHORS
@@ -13,6 +13,7 @@
Aaron Hopkins <go-sql-driver at die.net>
Achille Roussel <achille.roussel at gmail.com>
+Aidan <aidan.liu at pingcap.com>
Alex Snast <alexsn at fb.com>
Alexey Palazhchenko <alexey.palazhchenko at gmail.com>
Andrew Reid <andrew.reid at tixtrack.com>
@@ -20,12 +21,14 @@ Animesh Ray <mail.rayanimesh at gmail.com>
Arne Hormann <arnehormann at gmail.com>
Ariel Mashraki <ariel at mashraki.co.il>
Asta Xie <xiemengjun at gmail.com>
+Brian Hendriks <brian at dolthub.com>
Bulat Gaifullin <gaifullinbf at gmail.com>
Caine Jette <jette at alum.mit.edu>
Carlos Nieto <jose.carlos at menteslibres.net>
Chris Kirkland <chriskirkland at github.com>
Chris Moos <chris at tech9computers.com>
Craig Wilson <craiggwilson at gmail.com>
+Daemonxiao <735462752 at qq.com>
Daniel Montoya <dsmontoyam at gmail.com>
Daniel Nichter <nil at codenode.com>
Daniël van Eeden <git at myname.nl>
@@ -33,9 +36,11 @@ Dave Protasowski <dprotaso at gmail.com>
DisposaBoy <disposaboy at dby.me>
Egor Smolyakov <egorsmkv at gmail.com>
Erwan Martin <hello at erwan.io>
+Evan Elias <evan at skeema.net>
Evan Shaw <evan at vendhq.com>
Frederick Mayle <frederickmayle at gmail.com>
Gustavo Kristic <gkristic at gmail.com>
+Gusted <postmaster at gusted.xyz>
Hajime Nakagami <nakagami at gmail.com>
Hanno Braun <mail at hannobraun.com>
Henri Yandell <flamefew at gmail.com>
@@ -47,8 +52,11 @@ INADA Naoki <songofacandy at gmail.com>
Jacek Szwec <szwec.jacek at gmail.com>
James Harr <james.harr at gmail.com>
Janek Vedock <janekvedock at comcast.net>
+Jason Ng <oblitorum at gmail.com>
+Jean-Yves Pellé <jy at pelle.link>
Jeff Hodges <jeff at somethingsimilar.com>
Jeffrey Charles <jeffreycharles at gmail.com>
+Jennifer Purevsuren <jennifer at dolthub.com>
Jerome Meyer <jxmeyer at gmail.com>
Jiajia Zhong <zhong2plus at gmail.com>
Jian Zhen <zhenjl at gmail.com>
@@ -74,9 +82,11 @@ Maciej Zimnoch <maciej.zimnoch at codilime.com>
Michael Woolnough <michael.woolnough at gmail.com>
Nathanial Murphy <nathanial.murphy at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
+Oliver Bone <owbone at github.com>
Olivier Mengué <dolmen at cpan.org>
oscarzhao <oscarzhaosl at gmail.com>
Paul Bonser <misterpib at gmail.com>
+Paulius Lozys <pauliuslozys at gmail.com>
Peter Schultz <peter.schultz at classmarkets.com>
Phil Porada <philporada at gmail.com>
Rebecca Chin <rchin at pivotal.io>
@@ -95,6 +105,7 @@ Stan Putrya <root.vagner at gmail.com>
Stanley Gunawan <gunawan.stanley at gmail.com>
Steven Hartland <steven.hartland at multiplay.co.uk>
Tan Jinhua <312841925 at qq.com>
+Tetsuro Aoki <t.aoki1130 at gmail.com>
Thomas Wodarek <wodarekwebpage at gmail.com>
Tim Ruffles <timruffles at gmail.com>
Tom Jenkinson <tom at tjenkinson.me>
@@ -104,6 +115,7 @@ Xiangyu Hu <xiangyu.hu at outlook.com>
Xiaobing Jiang <s7v7nislands at gmail.com>
Xiuming Chen <cc at cxm.cc>
Xuehong Chan <chanxuehong at gmail.com>
+Zhang Xiang <angwerzx at 126.com>
Zhenye Xie <xiezhenye at gmail.com>
Zhixin Wen <john.wenzhixin at gmail.com>
Ziheng Lyu <zihenglv at gmail.com>
@@ -113,14 +125,18 @@ Ziheng Lyu <zihenglv at gmail.com>
Barracuda Networks, Inc.
Counting Ltd.
DigitalOcean Inc.
+Dolthub Inc.
dyves labs AG
Facebook Inc.
GitHub Inc.
Google Inc.
InfoSum Ltd.
Keybase Inc.
+Microsoft Corp.
Multiplay Ltd.
Percona LLC
+PingCAP Inc.
Pivotal Inc.
+Shattered Silicon Ltd.
Stripe Inc.
Zendesk Inc.
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/CHANGELOG.md b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/CHANGELOG.md
index 5166e4a..0c9bd9b 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/CHANGELOG.md
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/CHANGELOG.md
@@ -1,3 +1,45 @@
+## Version 1.8.1 (2024-03-26)
+
+Bugfixes:
+
+- fix race condition when context is canceled in [#1562](https://github.com/go-sql-driver/mysql/pull/1562) and [#1570](https://github.com/go-sql-driver/mysql/pull/1570)
+
+## Version 1.8.0 (2024-03-09)
+
+Major Changes:
+
+- Use `SET NAMES charset COLLATE collation`. by @methane in [#1437](https://github.com/go-sql-driver/mysql/pull/1437)
+ - Older go-mysql-driver used `collation_id` in the handshake packet. But it caused collation mismatch in some situation.
+ - If you don't specify charset nor collation, go-mysql-driver sends `SET NAMES utf8mb4` for new connection. This uses server's default collation for utf8mb4.
+ - If you specify charset, go-mysql-driver sends `SET NAMES <charset>`. This uses the server's default collation for `<charset>`.
+ - If you specify collation and/or charset, go-mysql-driver sends `SET NAMES charset COLLATE collation`.
+- PathEscape dbname in DSN. by @methane in [#1432](https://github.com/go-sql-driver/mysql/pull/1432)
+ - This is backward incompatible in rare case. Check your DSN.
+- Drop Go 1.13-17 support by @methane in [#1420](https://github.com/go-sql-driver/mysql/pull/1420)
+ - Use Go 1.18+
+- Parse numbers on text protocol too by @methane in [#1452](https://github.com/go-sql-driver/mysql/pull/1452)
+ - When text protocol is used, go-mysql-driver passed bare `[]byte` to database/sql for avoid unnecessary allocation and conversion.
+ - If user specified `*any` to `Scan()`, database/sql passed the `[]byte` into the target variable.
+ - This confused users because most user doesn't know when text/binary protocol used.
+ - go-mysql-driver 1.8 converts integer/float values into int64/double even in text protocol. This doesn't increase allocation compared to `[]byte` and conversion cost is negatable.
+- New options start using the Functional Option Pattern to avoid increasing technical debt in the Config object. Future version may introduce Functional Option for existing options, but not for now.
+ - Make TimeTruncate functional option by @methane in [1552](https://github.com/go-sql-driver/mysql/pull/1552)
+ - Add BeforeConnect callback to configuration object by @ItalyPaleAle in [#1469](https://github.com/go-sql-driver/mysql/pull/1469)
+
+
+Other changes:
+
+- Adding DeregisterDialContext to prevent memory leaks with dialers we don't need anymore by @jypelle in https://github.com/go-sql-driver/mysql/pull/1422
+- Make logger configurable per connection by @frozenbonito in https://github.com/go-sql-driver/mysql/pull/1408
+- Fix ColumnType.DatabaseTypeName for mediumint unsigned by @evanelias in https://github.com/go-sql-driver/mysql/pull/1428
+- Add connection attributes by @Daemonxiao in https://github.com/go-sql-driver/mysql/pull/1389
+- Stop `ColumnTypeScanType()` from returning `sql.RawBytes` by @methane in https://github.com/go-sql-driver/mysql/pull/1424
+- Exec() now provides access to status of multiple statements. by @mherr-google in https://github.com/go-sql-driver/mysql/pull/1309
+- Allow to change (or disable) the default driver name for registration by @dolmen in https://github.com/go-sql-driver/mysql/pull/1499
+- Add default connection attribute '_server_host' by @oblitorum in https://github.com/go-sql-driver/mysql/pull/1506
+- QueryUnescape DSN ConnectionAttribute value by @zhangyangyu in https://github.com/go-sql-driver/mysql/pull/1470
+- Add client_ed25519 authentication by @Gusted in https://github.com/go-sql-driver/mysql/pull/1518
+
## Version 1.7.1 (2023-04-25)
Changes:
@@ -162,7 +204,7 @@ New Features:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Support for returning table alias on Columns() (#289, #359, #382)
- - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Placeholder interpolation, can be activated with the DSN parameter `interpolateParams=true` (#309, #318, #490)
- Support for uint64 parameters with high bit set (#332, #345)
- Cleartext authentication plugin support (#327)
- Exported ParseDSN function and the Config struct (#403, #419, #429)
@@ -206,7 +248,7 @@ Changes:
- Also exported the MySQLWarning type
- mysqlConn.Close returns the first error encountered instead of ignoring all errors
- writePacket() automatically writes the packet size to the header
- - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+ - readPacket() uses an iterative approach instead of the recursive approach to merge split packets
New Features:
@@ -254,7 +296,7 @@ Bugfixes:
- Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
- Convert to DB timezone when inserting `time.Time`
- - Splitted packets (more than 16MB) are now merged correctly
+ - Split packets (more than 16MB) are now merged correctly
- Fixed false positive `io.EOF` errors when the data was fully read
- Avoid panics on reuse of closed connections
- Fixed empty string producing false nil values
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/LICENSE b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/LICENSE
index 14e2f77..14e2f77 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/LICENSE
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/LICENSE
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/README.md b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/README.md
index 3b5d229..4968cb0 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/README.md
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/README.md
@@ -40,15 +40,23 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
* Optional placeholder interpolation
## Requirements
- * Go 1.13 or higher. We aim to support the 3 latest versions of Go.
- * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+* Go 1.19 or higher. We aim to support the 3 latest versions of Go.
+* MySQL (5.7+) and MariaDB (10.3+) are supported.
+* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP.
+ * Do not ask questions about TiDB in our issue tracker or forum.
+ * [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang)
+ * [Forum](https://ask.pingcap.com/)
+* go-mysql would work with Percona Server, Google CloudSQL or Sphinx (2.2.3+).
+ * Maintainers won't support them. Do not expect issues are investigated and resolved by maintainers.
+ * Investigate issues yourself and please send a pull request to fix it.
---------------------------------------
## Installation
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
-$ go get -u github.com/go-sql-driver/mysql
+go get -u github.com/go-sql-driver/mysql
```
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
@@ -114,6 +122,12 @@ This has the same effect as an empty DSN string:
```
+`dbname` is escaped by [PathEscape()](https://pkg.go.dev/net/url#PathEscape) since v1.8.0. If your database name is `dbname/withslash`, it becomes:
+
+```
+/dbname%2Fwithslash
+```
+
Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
#### Password
@@ -121,7 +135,7 @@ Passwords can consist of any character. Escaping is **not** necessary.
#### Protocol
See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
-In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+In general you should use a Unix domain socket if available and TCP otherwise for best performance.
#### Address
For TCP and UDP networks, addresses have the form `host[:port]`.
@@ -145,7 +159,7 @@ Default: false
```
`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
-[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+[*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)
##### `allowCleartextPasswords`
@@ -194,10 +208,9 @@ Valid Values: <name>
Default: none
```
-Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset fails. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
-Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
-Unless you need the fallback behavior, please use `collation` instead.
+See also [Unicode Support](#unicode-support).
##### `checkConnLiveness`
@@ -226,6 +239,7 @@ The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You s
Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
+See also [Unicode Support](#unicode-support).
##### `clientFoundRows`
@@ -279,6 +293,15 @@ Note that this sets the location for time.Time values but does not change MySQL'
Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+##### `timeTruncate`
+
+```
+Type: duration
+Default: 0
+```
+
+[Truncate time values](https://pkg.go.dev/time#Duration.Truncate) to the specified duration. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
##### `maxAllowedPacket`
```
Type: decimal number
@@ -295,9 +318,25 @@ Valid Values: true, false
Default: false
```
-Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+Allow multiple statements in one query. This can be used to bach multiple queries. Use [Rows.NextResultSet()](https://pkg.go.dev/database/sql#Rows.NextResultSet) to get result of the second and subsequent queries.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement. [interpolateParams](#interpolateparams) can be used to avoid this limitation unless prepared statement is used explicitly.
+
+It's possible to access the last inserted ID and number of affected rows for multiple statements by using `sql.Conn.Raw()` and the `mysql.Result`. For example:
-When `multiStatements` is used, `?` parameters must only be used in the first statement.
+```go
+conn, _ := db.Conn(ctx)
+conn.Raw(func(conn any) error {
+ ex := conn.(driver.Execer)
+ res, err := ex.Exec(`
+ UPDATE point SET x = 1 WHERE y = 2;
+ UPDATE point SET x = 2 WHERE y = 3;
+ `, nil)
+ // Both slices have 2 elements.
+ log.Print(res.(mysql.Result).AllRowsAffected())
+ log.Print(res.(mysql.Result).AllLastInsertIds())
+})
+```
##### `parseTime`
@@ -393,6 +432,15 @@ Default: 0
I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+##### `connectionAttributes`
+
+```
+Type: comma-delimited string of user-defined "key:value" pairs
+Valid Values: (<name1>:<value1>,<name2>:<value2>,...)
+Default: none
+```
+
+[Connection attributes](https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html) are key-value pairs that application programs can pass to the server at connect time.
##### System Variables
@@ -465,7 +513,7 @@ user:password@/
The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
## `ColumnType` Support
-This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `BIGINT`.
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `MEDIUMINT`, `BIGINT`.
## `context.Context` Support
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
@@ -478,7 +526,7 @@ For this feature you need direct access to the package. Therefore you must chang
import "github.com/go-sql-driver/mysql"
```
-Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)).
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
@@ -496,9 +544,11 @@ However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` v
### Unicode support
Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
-Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+Other charsets / collations can be set using the [`charset`](#charset) or [`collation`](#collation) DSN parameter.
-Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+- When only the `charset` is specified, the `SET NAMES <charset>` query is sent and the server's default collation is used.
+- When both the `charset` and `collation` are specified, the `SET NAMES <charset> COLLATE <collation>` query is sent.
+- When only the `collation` is specified, the collation is specified in the protocol handshake and the `SET NAMES` query is not sent. This can save one roundtrip, but note that the server may ignore the specified collation silently and use the server's default charset/collation instead.
See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool.go
index 1b7e19f..1b7e19f 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_go118.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_go118.go
index 2e9a7f0..2e9a7f0 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_go118.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_go118.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_test.go
index a3b4ea0..a3b4ea0 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_test.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth.go
index 1ff203e..74e1bd0 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth.go
@@ -13,10 +13,13 @@ import (
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
+ "crypto/sha512"
"crypto/x509"
"encoding/pem"
"fmt"
"sync"
+
+ "filippo.io/edwards25519"
)
// server pub keys registry
@@ -33,7 +36,7 @@ var (
// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
// after registering it and may not be modified.
//
-// data, err := ioutil.ReadFile("mykey.pem")
+// data, err := os.ReadFile("mykey.pem")
// if err != nil {
// log.Fatal(err)
// }
@@ -225,6 +228,44 @@ func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte,
return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
}
+// authEd25519 does ed25519 authentication used by MariaDB.
+func authEd25519(scramble []byte, password string) ([]byte, error) {
+ // Derived from https://github.com/MariaDB/server/blob/d8e6bb00888b1f82c031938f4c8ac5d97f6874c3/plugin/auth_ed25519/ref10/sign.c
+ // Code style is from https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/crypto/ed25519/ed25519.go;l=207
+ h := sha512.Sum512([]byte(password))
+
+ s, err := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
+ if err != nil {
+ return nil, err
+ }
+ A := (&edwards25519.Point{}).ScalarBaseMult(s)
+
+ mh := sha512.New()
+ mh.Write(h[32:])
+ mh.Write(scramble)
+ messageDigest := mh.Sum(nil)
+ r, err := edwards25519.NewScalar().SetUniformBytes(messageDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ R := (&edwards25519.Point{}).ScalarBaseMult(r)
+
+ kh := sha512.New()
+ kh.Write(R.Bytes())
+ kh.Write(A.Bytes())
+ kh.Write(scramble)
+ hramDigest := kh.Sum(nil)
+ k, err := edwards25519.NewScalar().SetUniformBytes(hramDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ S := k.MultiplyAdd(k, s, r)
+
+ return append(R.Bytes(), S.Bytes()...), nil
+}
+
func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
if err != nil {
@@ -290,8 +331,14 @@ func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
return enc, err
+ case "client_ed25519":
+ if len(authData) != 32 {
+ return nil, ErrMalformPkt
+ }
+ return authEd25519(authData, mc.cfg.Passwd)
+
default:
- errLog.Print("unknown auth plugin:", plugin)
+ mc.log("unknown auth plugin:", plugin)
return nil, ErrUnknownPlugin
}
}
@@ -338,7 +385,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
switch plugin {
- // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ // https://dev.mysql.com/blog-archive/preparing-your-community-connector-for-mysql-8-part-2-sha256/
case "caching_sha2_password":
switch len(authData) {
case 0:
@@ -346,7 +393,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
case 1:
switch authData[0] {
case cachingSha2PasswordFastAuthSuccess:
- if err = mc.readResultOK(); err == nil {
+ if err = mc.resultUnchanged().readResultOK(); err == nil {
return nil // auth successful
}
@@ -376,13 +423,13 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
}
if data[0] != iAuthMoreData {
- return fmt.Errorf("unexpect resp from server for caching_sha2_password perform full authentication")
+ return fmt.Errorf("unexpected resp from server for caching_sha2_password, perform full authentication")
}
// parse public key
block, rest := pem.Decode(data[1:])
if block == nil {
- return fmt.Errorf("No Pem data found, data: %s", rest)
+ return fmt.Errorf("no pem data found, data: %s", rest)
}
pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
@@ -397,7 +444,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
return err
}
}
- return mc.readResultOK()
+ return mc.resultUnchanged().readResultOK()
default:
return ErrMalformPkt
@@ -426,7 +473,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
if err != nil {
return err
}
- return mc.readResultOK()
+ return mc.resultUnchanged().readResultOK()
}
default:
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth_test.go
index 3ce0ea6..8caed1f 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth_test.go
@@ -1328,3 +1328,54 @@ func TestAuthSwitchSHA256PasswordSecure(t *testing.T) {
t.Errorf("got unexpected data: %v", conn.written)
}
}
+
+// Derived from https://github.com/MariaDB/server/blob/6b2287fff23fbdc362499501c562f01d0d2db52e/plugin/auth_ed25519/ed25519-t.c
+func TestEd25519Auth(t *testing.T) {
+ conn, mc := newRWMockConn(1)
+ mc.cfg.User = "root"
+ mc.cfg.Passwd = "foobar"
+
+ authData := []byte("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
+ plugin := "client_ed25519"
+
+ // Send Client Authentication Packet
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // check written auth response
+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+ authRespEnd := authRespStart + 1 + len(authResp)
+ writtenAuthRespLen := conn.written[authRespStart]
+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+ expectedAuthResp := []byte{
+ 232, 61, 201, 63, 67, 63, 51, 53, 86, 73, 238, 35, 170, 117, 146,
+ 214, 26, 17, 35, 9, 8, 132, 245, 141, 48, 99, 66, 58, 36, 228, 48,
+ 84, 115, 254, 187, 168, 88, 162, 249, 57, 35, 85, 79, 238, 167, 106,
+ 68, 117, 56, 135, 171, 47, 20, 14, 133, 79, 15, 229, 124, 160, 176,
+ 100, 138, 14,
+ }
+ if writtenAuthRespLen != 64 {
+ t.Fatalf("expected 64 bytes from client, got %d", writtenAuthRespLen)
+ }
+ if !bytes.Equal(writtenAuthResp, expectedAuthResp) {
+ t.Fatalf("auth response did not match expected value:\n%v\n%v", writtenAuthResp, expectedAuthResp)
+ }
+ conn.written = nil
+
+ // auth response
+ conn.data = []byte{
+ 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
+ }
+ conn.maxReads = 1
+
+ // Handle response to auth packet
+ if err := mc.handleAuthResult(authData, plugin); err != nil {
+ t.Errorf("got error: %v", err)
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/benchmark_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/benchmark_test.go
index 97ed781..a4ecc0a 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/benchmark_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/benchmark_test.go
@@ -48,7 +48,7 @@ func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
func initDB(b *testing.B, queries ...string) *sql.DB {
tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
+ db := tb.checkDB(sql.Open(driverNameTest, dsn))
for _, query := range queries {
if _, err := db.Exec(query); err != nil {
b.Fatalf("error on %q: %v", query, err)
@@ -105,7 +105,7 @@ func BenchmarkExec(b *testing.B) {
tb := (*TB)(b)
b.StopTimer()
b.ReportAllocs()
- db := tb.checkDB(sql.Open("mysql", dsn))
+ db := tb.checkDB(sql.Open(driverNameTest, dsn))
db.SetMaxIdleConns(concurrencyLevel)
defer db.Close()
@@ -151,7 +151,7 @@ func BenchmarkRoundtripTxt(b *testing.B) {
sampleString := string(sample)
b.ReportAllocs()
tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
+ db := tb.checkDB(sql.Open(driverNameTest, dsn))
defer db.Close()
b.StartTimer()
var result string
@@ -184,7 +184,7 @@ func BenchmarkRoundtripBin(b *testing.B) {
sample, min, max := initRoundtripBenchmarks()
b.ReportAllocs()
tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
+ db := tb.checkDB(sql.Open(driverNameTest, dsn))
defer db.Close()
stmt := tb.checkStmt(db.Prepare("SELECT ?"))
defer stmt.Close()
@@ -372,3 +372,59 @@ func BenchmarkQueryRawBytes(b *testing.B) {
})
}
}
+
+// BenchmarkReceiveMassiveRows measures performance of receiving large number of rows.
+func BenchmarkReceiveMassiveRows(b *testing.B) {
+ // Setup -- prepare 10000 rows.
+ db := initDB(b,
+ "DROP TABLE IF EXISTS foo",
+ "CREATE TABLE foo (id INT PRIMARY KEY, val TEXT)")
+ defer db.Close()
+
+ sval := strings.Repeat("x", 50)
+ stmt, err := db.Prepare(`INSERT INTO foo (id, val) VALUES (?, ?)` + strings.Repeat(",(?,?)", 99))
+ if err != nil {
+ b.Errorf("failed to prepare query: %v", err)
+ return
+ }
+ for i := 0; i < 10000; i += 100 {
+ args := make([]any, 200)
+ for j := 0; j < 100; j++ {
+ args[j*2] = i + j
+ args[j*2+1] = sval
+ }
+ _, err := stmt.Exec(args...)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ }
+ stmt.Close()
+
+ // Use b.Run() to skip expensive setup.
+ b.Run("query", func(b *testing.B) {
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ rows, err := db.Query(`SELECT id, val FROM foo`)
+ if err != nil {
+ b.Errorf("failed to select: %v", err)
+ return
+ }
+ for rows.Next() {
+ var i int
+ var s sql.RawBytes
+ err = rows.Scan(&i, &s)
+ if err != nil {
+ b.Errorf("failed to scan: %v", err)
+ _ = rows.Close()
+ return
+ }
+ }
+ if err = rows.Err(); err != nil {
+ b.Errorf("failed to read rows: %v", err)
+ }
+ _ = rows.Close()
+ }
+ })
+}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/buffer.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/buffer.go
index 0774c5c..0774c5c 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/buffer.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/buffer.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/collations.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/collations.go
index 295bfbe..1cdf97b 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/collations.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/collations.go
@@ -9,7 +9,7 @@
package mysql
const defaultCollation = "utf8mb4_general_ci"
-const binaryCollation = "binary"
+const binaryCollationID = 63
// A list of available collations mapped to the internal ID.
// To update this map use the following MySQL query:
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck.go
index 0ea7217..0ea7217 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_dummy.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_dummy.go
index a56c138..a56c138 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_dummy.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_dummy.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_test.go
index f7e0256..6b60cb7 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_test.go
@@ -17,7 +17,7 @@ import (
)
func TestStaleConnectionChecks(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
dbt.mustExec("SET @@SESSION.wait_timeout = 2")
if err := dbt.db.Ping(); err != nil {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection.go
index 947a883..eff978d 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection.go
@@ -23,10 +23,10 @@ import (
type mysqlConn struct {
buf buffer
netConn net.Conn
- rawConn net.Conn // underlying connection when netConn is TLS connection.
- affectedRows uint64
- insertId uint64
+ rawConn net.Conn // underlying connection when netConn is TLS connection.
+ result mysqlResult // managed by clearResult() and handleOkPacket().
cfg *Config
+ connector *connector
maxAllowedPacket int
maxWriteSize int
writeTimeout time.Duration
@@ -34,7 +34,6 @@ type mysqlConn struct {
status statusFlag
sequence uint8
parseTime bool
- reset bool // set when the Go SQL package calls ResetSession
// for context support (Go 1.8+)
watching bool
@@ -45,17 +44,27 @@ type mysqlConn struct {
closed atomicBool // set when conn is closed, before closech is closed
}
+// Helper function to call per-connection logger.
+func (mc *mysqlConn) log(v ...any) {
+ mc.cfg.Logger.Print(v...)
+}
+
// Handles parameters set in DSN after the connection is established
func (mc *mysqlConn) handleParams() (err error) {
var cmdSet strings.Builder
+
for param, val := range mc.cfg.Params {
switch param {
// Charset: character_set_connection, character_set_client, character_set_results
case "charset":
charsets := strings.Split(val, ",")
- for i := range charsets {
+ for _, cs := range charsets {
// ignore errors here - a charset may not exist
- err = mc.exec("SET NAMES " + charsets[i])
+ if mc.cfg.Collation != "" {
+ err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation)
+ } else {
+ err = mc.exec("SET NAMES " + cs)
+ }
if err == nil {
break
}
@@ -68,7 +77,7 @@ func (mc *mysqlConn) handleParams() (err error) {
default:
if cmdSet.Len() == 0 {
// Heuristic: 29 chars for each other key=value to reduce reallocations
- cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1))
+ cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1))
cmdSet.WriteString("SET ")
} else {
cmdSet.WriteString(", ")
@@ -105,7 +114,7 @@ func (mc *mysqlConn) Begin() (driver.Tx, error) {
func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
var q string
@@ -128,7 +137,7 @@ func (mc *mysqlConn) Close() (err error) {
}
mc.cleanup()
-
+ mc.clearResult()
return
}
@@ -143,12 +152,16 @@ func (mc *mysqlConn) cleanup() {
// Makes cleanup idempotent
close(mc.closech)
- if mc.netConn == nil {
+ conn := mc.rawConn
+ if conn == nil {
return
}
- if err := mc.netConn.Close(); err != nil {
- errLog.Print(err)
+ if err := conn.Close(); err != nil {
+ mc.log(err)
}
+ // This function can be called from multiple goroutines.
+ // So we can not mc.clearResult() here.
+ // Caller should do it if they are in safe goroutine.
}
func (mc *mysqlConn) error() error {
@@ -163,14 +176,14 @@ func (mc *mysqlConn) error() error {
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := mc.writeCommandPacketStr(comStmtPrepare, query)
if err != nil {
// STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
- errLog.Print(err)
+ mc.log(err)
return nil, driver.ErrBadConn
}
@@ -204,7 +217,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
buf, err := mc.buf.takeCompleteBuffer()
if err != nil {
// can not take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return "", ErrInvalidConn
}
buf = buf[:0]
@@ -246,7 +259,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
buf = append(buf, "'0000-00-00'"...)
} else {
buf = append(buf, '\'')
- buf, err = appendDateTime(buf, v.In(mc.cfg.Loc))
+ buf, err = appendDateTime(buf, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
if err != nil {
return "", err
}
@@ -296,7 +309,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@@ -310,28 +323,25 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
}
query = prepared
}
- mc.affectedRows = 0
- mc.insertId = 0
err := mc.exec(query)
if err == nil {
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, err
+ copied := mc.result
+ return &copied, err
}
return nil, mc.markBadConn(err)
}
// Internal function to execute commands
func (mc *mysqlConn) exec(query string) error {
+ handleOk := mc.clearResult()
// Send command
if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
return mc.markBadConn(err)
}
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return err
}
@@ -348,7 +358,7 @@ func (mc *mysqlConn) exec(query string) error {
}
}
- return mc.discardResults()
+ return handleOk.discardResults()
}
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
@@ -356,8 +366,10 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
}
func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ handleOk := mc.clearResult()
+
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@@ -376,7 +388,7 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error)
if err == nil {
// Read Result
var resLen int
- resLen, err = mc.readResultSetHeaderPacket()
+ resLen, err = handleOk.readResultSetHeaderPacket()
if err == nil {
rows := new(textRows)
rows.mc = mc
@@ -404,12 +416,13 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error)
// The returned byte slice is only valid until the next read
func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
// Send command
+ handleOk := mc.clearResult()
if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
return nil, err
}
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err == nil {
rows := new(textRows)
rows.mc = mc
@@ -451,7 +464,7 @@ func (mc *mysqlConn) finish() {
// Ping implements driver.Pinger interface
func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return driver.ErrBadConn
}
@@ -460,11 +473,12 @@ func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
}
defer mc.finish()
+ handleOk := mc.clearResult()
if err = mc.writeCommandPacket(comPing); err != nil {
return mc.markBadConn(err)
}
- return mc.readResultOK()
+ return handleOk.readResultOK()
}
// BeginTx implements driver.ConnBeginTx interface
@@ -639,7 +653,31 @@ func (mc *mysqlConn) ResetSession(ctx context.Context) error {
if mc.closed.Load() {
return driver.ErrBadConn
}
- mc.reset = true
+
+ // Perform a stale connection check. We only perform this check for
+ // the first query on a connection that has been checked out of the
+ // connection pool: a fresh connection from the pool is more likely
+ // to be stale, and it has not performed any previous writes that
+ // could cause data corruption, so it's safe to return ErrBadConn
+ // if the check fails.
+ if mc.cfg.CheckConnLiveness {
+ conn := mc.netConn
+ if mc.rawConn != nil {
+ conn = mc.rawConn
+ }
+ var err error
+ if mc.cfg.ReadTimeout != 0 {
+ err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
+ }
+ if err == nil {
+ err = connCheck(conn)
+ }
+ if err != nil {
+ mc.log("closing bad idle connection: ", err)
+ return driver.ErrBadConn
+ }
+ }
+
return nil
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection_test.go
index b6764a2..98c985a 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection_test.go
@@ -179,6 +179,7 @@ func TestPingErrInvalidConn(t *testing.T) {
buf: newBuffer(nc),
maxAllowedPacket: defaultMaxAllowedPacket,
closech: make(chan struct{}),
+ cfg: NewConfig(),
}
err := ms.Ping(context.Background())
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector.go
index d567b4e..b670775 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector.go
@@ -12,10 +12,53 @@ import (
"context"
"database/sql/driver"
"net"
+ "os"
+ "strconv"
+ "strings"
)
type connector struct {
- cfg *Config // immutable private copy.
+ cfg *Config // immutable private copy.
+ encodedAttributes string // Encoded connection attributes.
+}
+
+func encodeConnectionAttributes(cfg *Config) string {
+ connAttrsBuf := make([]byte, 0)
+
+ // default connection attributes
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientName)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientNameValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOS)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOSValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatform)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatformValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPid)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, strconv.Itoa(os.Getpid()))
+ serverHost, _, _ := net.SplitHostPort(cfg.Addr)
+ if serverHost != "" {
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrServerHost)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, serverHost)
+ }
+
+ // user-defined connection attributes
+ for _, connAttr := range strings.Split(cfg.ConnectionAttributes, ",") {
+ k, v, found := strings.Cut(connAttr, ":")
+ if !found {
+ continue
+ }
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, k)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, v)
+ }
+
+ return string(connAttrsBuf)
+}
+
+func newConnector(cfg *Config) *connector {
+ encodedAttributes := encodeConnectionAttributes(cfg)
+ return &connector{
+ cfg: cfg,
+ encodedAttributes: encodedAttributes,
+ }
}
// Connect implements driver.Connector interface.
@@ -23,12 +66,23 @@ type connector struct {
func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
var err error
+ // Invoke beforeConnect if present, with a copy of the configuration
+ cfg := c.cfg
+ if c.cfg.beforeConnect != nil {
+ cfg = c.cfg.Clone()
+ err = c.cfg.beforeConnect(ctx, cfg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
// New mysqlConn
mc := &mysqlConn{
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
closech: make(chan struct{}),
- cfg: c.cfg,
+ cfg: cfg,
+ connector: c,
}
mc.parseTime = mc.cfg.ParseTime
@@ -48,18 +102,15 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
nd := net.Dialer{Timeout: mc.cfg.Timeout}
mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
}
-
if err != nil {
return nil, err
}
+ mc.rawConn = mc.netConn
// Enable TCP Keepalives on TCP connections
if tc, ok := mc.netConn.(*net.TCPConn); ok {
if err := tc.SetKeepAlive(true); err != nil {
- // Don't send COM_QUIT before handshake.
- mc.netConn.Close()
- mc.netConn = nil
- return nil, err
+ c.cfg.Logger.Print(err)
}
}
@@ -92,7 +143,7 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
authResp, err := mc.auth(authData, plugin)
if err != nil {
// try the default auth plugin, if using the requested plugin failed
- errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ c.cfg.Logger.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
plugin = defaultAuthPlugin
authResp, err = mc.auth(authData, plugin)
if err != nil {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector_test.go
index 976903c..82d8c59 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector_test.go
@@ -8,11 +8,11 @@ import (
)
func TestConnectorReturnsTimeout(t *testing.T) {
- connector := &connector{&Config{
+ connector := newConnector(&Config{
Net: "tcp",
Addr: "1.1.1.1:1234",
Timeout: 10 * time.Millisecond,
- }}
+ })
_, err := connector.Connect(context.Background())
if err == nil {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/const.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/const.go
index 64e2bce..22526e0 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/const.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/const.go
@@ -8,12 +8,25 @@
package mysql
+import "runtime"
+
const (
defaultAuthPlugin = "mysql_native_password"
defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355
minProtocolVersion = 10
maxPacketSize = 1<<24 - 1
timeFormat = "2006-01-02 15:04:05.999999"
+
+ // Connection attributes
+ // See https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html#performance-schema-connection-attributes-available
+ connAttrClientName = "_client_name"
+ connAttrClientNameValue = "Go-MySQL-Driver"
+ connAttrOS = "_os"
+ connAttrOSValue = runtime.GOOS
+ connAttrPlatform = "_platform"
+ connAttrPlatformValue = runtime.GOARCH
+ connAttrPid = "_pid"
+ connAttrServerHost = "_server_host"
)
// MySQL constants documentation:
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver.go
index ad7aec2..105316b 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver.go
@@ -55,6 +55,15 @@ func RegisterDialContext(net string, dial DialContextFunc) {
dials[net] = dial
}
+// DeregisterDialContext removes the custom dial function registered with the given net.
+func DeregisterDialContext(net string) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
+ if dials != nil {
+ delete(dials, net)
+ }
+}
+
// RegisterDial registers a custom dial function. It can then be used by the
// network address mynet(addr), where mynet is the registered new network.
// addr is passed as a parameter to the dial function.
@@ -74,14 +83,18 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
if err != nil {
return nil, err
}
- c := &connector{
- cfg: cfg,
- }
+ c := newConnector(cfg)
return c.Connect(context.Background())
}
+// This variable can be replaced with -ldflags like below:
+// go build "-ldflags=-X github.com/go-sql-driver/mysql.driverName=custom"
+var driverName = "mysql"
+
func init() {
- sql.Register("mysql", &MySQLDriver{})
+ if driverName != "" {
+ sql.Register(driverName, &MySQLDriver{})
+ }
}
// NewConnector returns new driver.Connector.
@@ -92,7 +105,7 @@ func NewConnector(cfg *Config) (driver.Connector, error) {
if err := cfg.normalize(); err != nil {
return nil, err
}
- return &connector{cfg: cfg}, nil
+ return newConnector(cfg), nil
}
// OpenConnector implements driver.DriverContext.
@@ -101,7 +114,5 @@ func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
if err != nil {
return nil, err
}
- return &connector{
- cfg: cfg,
- }, nil
+ return newConnector(cfg), nil
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver_test.go
index a1c7767..4fd196d 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver_test.go
@@ -11,20 +11,22 @@ package mysql
import (
"bytes"
"context"
+ "crypto/rand"
"crypto/tls"
"database/sql"
"database/sql/driver"
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"log"
"math"
+ mrand "math/rand"
"net"
"net/url"
"os"
"reflect"
"runtime"
+ "strconv"
"strings"
"sync"
"sync/atomic"
@@ -32,6 +34,16 @@ import (
"time"
)
+// This variable can be replaced with -ldflags like below:
+// go test "-ldflags=-X github.com/go-sql-driver/mysql.driverNameTest=custom"
+var driverNameTest string
+
+func init() {
+ if driverNameTest == "" {
+ driverNameTest = driverName
+ }
+}
+
// Ensure that all the driver interfaces are implemented
var (
_ driver.Rows = &binaryRows{}
@@ -83,7 +95,7 @@ func init() {
}
type DBTest struct {
- *testing.T
+ testing.TB
db *sql.DB
}
@@ -112,12 +124,14 @@ func runTestsWithMultiStatement(t *testing.T, dsn string, tests ...func(dbt *DBT
dsn += "&multiStatements=true"
var db *sql.DB
if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation {
- db, err = sql.Open("mysql", dsn)
+ db, err = sql.Open(driverNameTest, dsn)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
defer db.Close()
}
+ // Previous test may be skipped without dropping the test table
+ db.Exec("DROP TABLE IF EXISTS test")
dbt := &DBTest{t, db}
for _, test := range tests {
@@ -131,59 +145,111 @@ func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
t.Skipf("MySQL server not running on %s", netAddr)
}
- db, err := sql.Open("mysql", dsn)
+ db, err := sql.Open(driverNameTest, dsn)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
defer db.Close()
- db.Exec("DROP TABLE IF EXISTS test")
+ cleanup := func() {
+ db.Exec("DROP TABLE IF EXISTS test")
+ }
dsn2 := dsn + "&interpolateParams=true"
var db2 *sql.DB
if _, err := ParseDSN(dsn2); err != errInvalidDSNUnsafeCollation {
- db2, err = sql.Open("mysql", dsn2)
+ db2, err = sql.Open(driverNameTest, dsn2)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
defer db2.Close()
}
- dsn3 := dsn + "&multiStatements=true"
- var db3 *sql.DB
- if _, err := ParseDSN(dsn3); err != errInvalidDSNUnsafeCollation {
- db3, err = sql.Open("mysql", dsn3)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
+ for _, test := range tests {
+ test := test
+ t.Run("default", func(t *testing.T) {
+ dbt := &DBTest{t, db}
+ t.Cleanup(cleanup)
+ test(dbt)
+ })
+ if db2 != nil {
+ t.Run("interpolateParams", func(t *testing.T) {
+ dbt2 := &DBTest{t, db2}
+ t.Cleanup(cleanup)
+ test(dbt2)
+ })
}
- defer db3.Close()
}
+}
- dbt := &DBTest{t, db}
- dbt2 := &DBTest{t, db2}
- dbt3 := &DBTest{t, db3}
- for _, test := range tests {
- test(dbt)
- dbt.db.Exec("DROP TABLE IF EXISTS test")
- if db2 != nil {
- test(dbt2)
- dbt2.db.Exec("DROP TABLE IF EXISTS test")
+// runTestsParallel runs the tests in parallel with a separate database connection for each test.
+func runTestsParallel(t *testing.T, dsn string, tests ...func(dbt *DBTest, tableName string)) {
+ if !available {
+ t.Skipf("MySQL server not running on %s", netAddr)
+ }
+
+ newTableName := func(t *testing.T) string {
+ t.Helper()
+ var buf [8]byte
+ if _, err := rand.Read(buf[:]); err != nil {
+ t.Fatal(err)
}
- if db3 != nil {
- test(dbt3)
- dbt3.db.Exec("DROP TABLE IF EXISTS test")
+ return fmt.Sprintf("test_%x", buf[:])
+ }
+
+ t.Parallel()
+ for _, test := range tests {
+ test := test
+
+ t.Run("default", func(t *testing.T) {
+ t.Parallel()
+
+ tableName := newTableName(t)
+ db, err := sql.Open("mysql", dsn)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ t.Cleanup(func() {
+ db.Exec("DROP TABLE IF EXISTS " + tableName)
+ db.Close()
+ })
+
+ dbt := &DBTest{t, db}
+ test(dbt, tableName)
+ })
+
+ dsn2 := dsn + "&interpolateParams=true"
+ if _, err := ParseDSN(dsn2); err == errInvalidDSNUnsafeCollation {
+ t.Run("interpolateParams", func(t *testing.T) {
+ t.Parallel()
+
+ tableName := newTableName(t)
+ db, err := sql.Open("mysql", dsn2)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ t.Cleanup(func() {
+ db.Exec("DROP TABLE IF EXISTS " + tableName)
+ db.Close()
+ })
+
+ dbt := &DBTest{t, db}
+ test(dbt, tableName)
+ })
}
}
}
func (dbt *DBTest) fail(method, query string, err error) {
+ dbt.Helper()
if len(query) > 300 {
query = "[query too large to print]"
}
dbt.Fatalf("error on %s %s: %s", method, query, err.Error())
}
-func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) {
+func (dbt *DBTest) mustExec(query string, args ...any) (res sql.Result) {
+ dbt.Helper()
res, err := dbt.db.Exec(query, args...)
if err != nil {
dbt.fail("exec", query, err)
@@ -191,7 +257,8 @@ func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result)
return res
}
-func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) {
+func (dbt *DBTest) mustQuery(query string, args ...any) (rows *sql.Rows) {
+ dbt.Helper()
rows, err := dbt.db.Query(query, args...)
if err != nil {
dbt.fail("query", query, err)
@@ -211,7 +278,7 @@ func maybeSkip(t *testing.T, err error, skipErrno uint16) {
}
func TestEmptyQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
// just a comment, no query
rows := dbt.mustQuery("--")
defer rows.Close()
@@ -223,20 +290,20 @@ func TestEmptyQuery(t *testing.T) {
}
func TestCRUD(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
// Create Table
- dbt.mustExec("CREATE TABLE test (value BOOL)")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value BOOL)")
// Test for unexpected data
var out bool
- rows := dbt.mustQuery("SELECT * FROM test")
+ rows := dbt.mustQuery("SELECT * FROM " + tbl)
if rows.Next() {
dbt.Error("unexpected data in empty table")
}
rows.Close()
// Create Data
- res := dbt.mustExec("INSERT INTO test VALUES (1)")
+ res := dbt.mustExec("INSERT INTO " + tbl + " VALUES (1)")
count, err := res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -254,7 +321,7 @@ func TestCRUD(t *testing.T) {
}
// Read
- rows = dbt.mustQuery("SELECT value FROM test")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if true != out {
@@ -270,7 +337,7 @@ func TestCRUD(t *testing.T) {
rows.Close()
// Update
- res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true)
+ res = dbt.mustExec("UPDATE "+tbl+" SET value = ? WHERE value = ?", false, true)
count, err = res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -280,7 +347,7 @@ func TestCRUD(t *testing.T) {
}
// Check Update
- rows = dbt.mustQuery("SELECT value FROM test")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if false != out {
@@ -296,7 +363,7 @@ func TestCRUD(t *testing.T) {
rows.Close()
// Delete
- res = dbt.mustExec("DELETE FROM test WHERE value = ?", false)
+ res = dbt.mustExec("DELETE FROM "+tbl+" WHERE value = ?", false)
count, err = res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -306,7 +373,7 @@ func TestCRUD(t *testing.T) {
}
// Check for unexpected rows
- res = dbt.mustExec("DELETE FROM test")
+ res = dbt.mustExec("DELETE FROM " + tbl)
count, err = res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -317,6 +384,51 @@ func TestCRUD(t *testing.T) {
})
}
+// TestNumbers test that selecting numeric columns.
+// Both of textRows and binaryRows should return same type and value.
+func TestNumbersToAny(t *testing.T) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (id INT PRIMARY KEY, b BOOL, i8 TINYINT, " +
+ "i16 SMALLINT, i32 INT, i64 BIGINT, f32 FLOAT, f64 DOUBLE, iu32 INT UNSIGNED)")
+ dbt.mustExec("INSERT INTO " + tbl + " VALUES (1, true, 127, 32767, 2147483647, 9223372036854775807, 1.25, 2.5, 4294967295)")
+
+ // Use binaryRows for interpolateParams=false and textRows for interpolateParams=true.
+ rows := dbt.mustQuery("SELECT b, i8, i16, i32, i64, f32, f64, iu32 FROM "+tbl+" WHERE id=?", 1)
+ if !rows.Next() {
+ dbt.Fatal("no data")
+ }
+ var b, i8, i16, i32, i64, f32, f64, iu32 any
+ err := rows.Scan(&b, &i8, &i16, &i32, &i64, &f32, &f64, &iu32)
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ if b.(int64) != 1 {
+ dbt.Errorf("b != 1")
+ }
+ if i8.(int64) != 127 {
+ dbt.Errorf("i8 != 127")
+ }
+ if i16.(int64) != 32767 {
+ dbt.Errorf("i16 != 32767")
+ }
+ if i32.(int64) != 2147483647 {
+ dbt.Errorf("i32 != 2147483647")
+ }
+ if i64.(int64) != 9223372036854775807 {
+ dbt.Errorf("i64 != 9223372036854775807")
+ }
+ if f32.(float32) != 1.25 {
+ dbt.Errorf("f32 != 1.25")
+ }
+ if f64.(float64) != 2.5 {
+ dbt.Errorf("f64 != 2.5")
+ }
+ if iu32.(int64) != 4294967295 {
+ dbt.Errorf("iu32 != 4294967295")
+ }
+ })
+}
+
func TestMultiQuery(t *testing.T) {
runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
// Create Table
@@ -347,8 +459,8 @@ func TestMultiQuery(t *testing.T) {
rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;")
if rows.Next() {
rows.Scan(&out)
- if 5 != out {
- dbt.Errorf("5 != %d", out)
+ if out != 5 {
+ dbt.Errorf("expected 5, got %d", out)
}
if rows.Next() {
@@ -363,7 +475,7 @@ func TestMultiQuery(t *testing.T) {
}
func TestInt(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"}
in := int64(42)
var out int64
@@ -371,11 +483,11 @@ func TestInt(t *testing.T) {
// SIGNED
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if in != out {
@@ -386,16 +498,16 @@ func TestInt(t *testing.T) {
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
// UNSIGNED ZEROFILL
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value " + v + " ZEROFILL)")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if in != out {
@@ -406,21 +518,21 @@ func TestInt(t *testing.T) {
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
})
}
func TestFloat32(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
types := [2]string{"FLOAT", "DOUBLE"}
in := float32(42.23)
var out float32
var rows *sql.Rows
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value " + v + ")")
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if in != out {
@@ -430,21 +542,21 @@ func TestFloat32(t *testing.T) {
dbt.Errorf("%s: no data", v)
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
})
}
func TestFloat64(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
types := [2]string{"FLOAT", "DOUBLE"}
var expected float64 = 42.23
var out float64
var rows *sql.Rows
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (42.23)")
- rows = dbt.mustQuery("SELECT value FROM test")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value " + v + ")")
+ dbt.mustExec("INSERT INTO " + tbl + " VALUES (42.23)")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if expected != out {
@@ -454,21 +566,21 @@ func TestFloat64(t *testing.T) {
dbt.Errorf("%s: no data", v)
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
})
}
func TestFloat64Placeholder(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
types := [2]string{"FLOAT", "DOUBLE"}
var expected float64 = 42.23
var out float64
var rows *sql.Rows
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (id int, value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (1, 42.23)")
- rows = dbt.mustQuery("SELECT value FROM test WHERE id = ?", 1)
+ dbt.mustExec("CREATE TABLE " + tbl + " (id int, value " + v + ")")
+ dbt.mustExec("INSERT INTO " + tbl + " VALUES (1, 42.23)")
+ rows = dbt.mustQuery("SELECT value FROM "+tbl+" WHERE id = ?", 1)
if rows.Next() {
rows.Scan(&out)
if expected != out {
@@ -478,24 +590,24 @@ func TestFloat64Placeholder(t *testing.T) {
dbt.Errorf("%s: no data", v)
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
})
}
func TestString(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"}
in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย"
var out string
var rows *sql.Rows
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value " + v + ") CHARACTER SET utf8")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if in != out {
@@ -506,11 +618,11 @@ func TestString(t *testing.T) {
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
// BLOB
- dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8")
+ dbt.mustExec("CREATE TABLE " + tbl + " (id int, value BLOB) CHARACTER SET utf8")
id := 2
in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
@@ -521,9 +633,9 @@ func TestString(t *testing.T) {
"sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
"sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
"Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet."
- dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in)
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?, ?)", id, in)
- err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out)
+ err := dbt.db.QueryRow("SELECT value FROM "+tbl+" WHERE id = ?", id).Scan(&out)
if err != nil {
dbt.Fatalf("Error on BLOB-Query: %s", err.Error())
} else if out != in {
@@ -533,7 +645,7 @@ func TestString(t *testing.T) {
}
func TestRawBytes(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
v1 := []byte("aaa")
v2 := []byte("bbb")
rows := dbt.mustQuery("SELECT ?, ?", v1, v2)
@@ -562,7 +674,7 @@ func TestRawBytes(t *testing.T) {
}
func TestRawMessage(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
v1 := json.RawMessage("{}")
v2 := json.RawMessage("[]")
rows := dbt.mustQuery("SELECT ?, ?", v1, v2)
@@ -593,14 +705,14 @@ func (tv testValuer) Value() (driver.Value, error) {
}
func TestValuer(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
in := testValuer{"a_value"}
var out string
var rows *sql.Rows
- dbt.mustExec("CREATE TABLE test (value VARCHAR(255)) CHARACTER SET utf8")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value VARCHAR(255)) CHARACTER SET utf8")
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if in.value != out {
@@ -610,8 +722,6 @@ func TestValuer(t *testing.T) {
dbt.Errorf("Valuer: no data")
}
rows.Close()
-
- dbt.mustExec("DROP TABLE IF EXISTS test")
})
}
@@ -628,15 +738,15 @@ func (tv testValuerWithValidation) Value() (driver.Value, error) {
}
func TestValuerWithValidation(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
in := testValuerWithValidation{"a_value"}
var out string
var rows *sql.Rows
- dbt.mustExec("CREATE TABLE testValuer (value VARCHAR(255)) CHARACTER SET utf8")
- dbt.mustExec("INSERT INTO testValuer VALUES (?)", in)
+ dbt.mustExec("CREATE TABLE " + tbl + " (value VARCHAR(255)) CHARACTER SET utf8")
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM testValuer")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
defer rows.Close()
if rows.Next() {
@@ -648,19 +758,17 @@ func TestValuerWithValidation(t *testing.T) {
dbt.Errorf("Valuer: no data")
}
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", testValuerWithValidation{""}); err == nil {
+ if _, err := dbt.db.Exec("INSERT INTO "+tbl+" VALUES (?)", testValuerWithValidation{""}); err == nil {
dbt.Errorf("Failed to check valuer error")
}
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", nil); err != nil {
+ if _, err := dbt.db.Exec("INSERT INTO "+tbl+" VALUES (?)", nil); err != nil {
dbt.Errorf("Failed to check nil")
}
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", map[string]bool{}); err == nil {
+ if _, err := dbt.db.Exec("INSERT INTO "+tbl+" VALUES (?)", map[string]bool{}); err == nil {
dbt.Errorf("Failed to check not valuer")
}
-
- dbt.mustExec("DROP TABLE IF EXISTS testValuer")
})
}
@@ -737,7 +845,7 @@ func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) {
dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
return
}
- var dst interface{}
+ var dst any
err = rows.Scan(&dst)
if err != nil {
dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
@@ -768,7 +876,7 @@ func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) {
t.s, val.Format(tlayout),
)
default:
- fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t})
+ fmt.Printf("%#v\n", []any{dbtype, tlayout, mode, t.s, t.t})
dbt.Errorf("%s [%s]: unhandled type %T (is '%v')",
dbtype, mode,
val, val,
@@ -894,7 +1002,7 @@ func TestTimestampMicros(t *testing.T) {
f0 := format[:19]
f1 := format[:21]
f6 := format[:26]
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
// check if microseconds are supported.
// Do not use timestamp(x) for that check - before 5.5.6, x would mean display width
// and not precision.
@@ -909,7 +1017,7 @@ func TestTimestampMicros(t *testing.T) {
return
}
_, err := dbt.db.Exec(`
- CREATE TABLE test (
+ CREATE TABLE ` + tbl + ` (
value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `',
value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `',
value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `'
@@ -918,10 +1026,10 @@ func TestTimestampMicros(t *testing.T) {
if err != nil {
dbt.Error(err)
}
- defer dbt.mustExec("DROP TABLE IF EXISTS test")
- dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6)
+ defer dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
+ dbt.mustExec("INSERT INTO "+tbl+" SET value0=?, value1=?, value6=?", f0, f1, f6)
var res0, res1, res6 string
- rows := dbt.mustQuery("SELECT * FROM test")
+ rows := dbt.mustQuery("SELECT * FROM " + tbl)
defer rows.Close()
if !rows.Next() {
dbt.Errorf("test contained no selectable values")
@@ -943,7 +1051,7 @@ func TestTimestampMicros(t *testing.T) {
}
func TestNULL(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
nullStmt, err := dbt.db.Prepare("SELECT NULL")
if err != nil {
dbt.Fatal(err)
@@ -1075,12 +1183,12 @@ func TestNULL(t *testing.T) {
}
// Insert NULL
- dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)")
+ dbt.mustExec("CREATE TABLE " + tbl + " (dummmy1 int, value int, dummy2 int)")
- dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2)
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?, ?, ?)", 1, nil, 2)
- var out interface{}
- rows := dbt.mustQuery("SELECT * FROM test")
+ var out any
+ rows := dbt.mustQuery("SELECT * FROM " + tbl)
defer rows.Close()
if rows.Next() {
rows.Scan(&out)
@@ -1104,7 +1212,7 @@ func TestUint64(t *testing.T) {
shigh = int64(uhigh)
stop = ^shigh
)
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`)
if err != nil {
dbt.Fatal(err)
@@ -1168,7 +1276,7 @@ func TestLongData(t *testing.T) {
dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out))
}
if rows.Next() {
- dbt.Error("LONGBLOB: unexpexted row")
+ dbt.Error("LONGBLOB: unexpected row")
}
} else {
dbt.Fatalf("LONGBLOB: no data")
@@ -1187,7 +1295,7 @@ func TestLongData(t *testing.T) {
dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out))
}
if rows.Next() {
- dbt.Error("LONGBLOB: unexpexted row")
+ dbt.Error("LONGBLOB: unexpected row")
}
} else {
if err = rows.Err(); err != nil {
@@ -1245,7 +1353,7 @@ func TestLoadData(t *testing.T) {
dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8")
// Local File
- file, err := ioutil.TempFile("", "gotest")
+ file, err := os.CreateTemp("", "gotest")
defer os.Remove(file.Name())
if err != nil {
dbt.Fatal(err)
@@ -1263,7 +1371,7 @@ func TestLoadData(t *testing.T) {
dbt.Fatalf("unexpected row count: got %d, want 0", count)
}
- // Then fille File with data and try to load it
+ // Then fill File with data and try to load it
file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n")
file.Close()
dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
@@ -1294,18 +1402,18 @@ func TestLoadData(t *testing.T) {
_, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test")
if err == nil {
dbt.Fatal("load non-existent Reader didn't fail")
- } else if err.Error() != "Reader 'doesnotexist' is not registered" {
+ } else if err.Error() != "reader 'doesnotexist' is not registered" {
dbt.Fatal(err.Error())
}
})
}
-func TestFoundRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
- dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
+func TestFoundRows1(t *testing.T) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (id INT NOT NULL ,data INT NOT NULL)")
+ dbt.mustExec("INSERT INTO " + tbl + " (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
- res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
+ res := dbt.mustExec("UPDATE " + tbl + " SET data = 1 WHERE id = 0")
count, err := res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -1313,7 +1421,7 @@ func TestFoundRows(t *testing.T) {
if count != 2 {
dbt.Fatalf("Expected 2 affected rows, got %d", count)
}
- res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
+ res = dbt.mustExec("UPDATE " + tbl + " SET data = 1 WHERE id = 1")
count, err = res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -1322,11 +1430,14 @@ func TestFoundRows(t *testing.T) {
dbt.Fatalf("Expected 2 affected rows, got %d", count)
}
})
- runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
- dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
+}
+
+func TestFoundRows2(t *testing.T) {
+ runTestsParallel(t, dsn+"&clientFoundRows=true", func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (id INT NOT NULL ,data INT NOT NULL)")
+ dbt.mustExec("INSERT INTO " + tbl + " (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
- res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
+ res := dbt.mustExec("UPDATE " + tbl + " SET data = 1 WHERE id = 0")
count, err := res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -1334,7 +1445,7 @@ func TestFoundRows(t *testing.T) {
if count != 2 {
dbt.Fatalf("Expected 2 matched rows, got %d", count)
}
- res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
+ res = dbt.mustExec("UPDATE " + tbl + " SET data = 1 WHERE id = 1")
count, err = res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -1402,6 +1513,7 @@ func TestReuseClosedConnection(t *testing.T) {
if err != nil {
t.Fatalf("error preparing statement: %s", err.Error())
}
+ //lint:ignore SA1019 this is a test
_, err = stmt.Exec(nil)
if err != nil {
t.Fatalf("error executing statement: %s", err.Error())
@@ -1416,6 +1528,7 @@ func TestReuseClosedConnection(t *testing.T) {
t.Errorf("panic after reusing a closed connection: %v", err)
}
}()
+ //lint:ignore SA1019 this is a test
_, err = stmt.Exec(nil)
if err != nil && err != driver.ErrBadConn {
t.Errorf("unexpected error '%s', expected '%s'",
@@ -1458,7 +1571,7 @@ func TestCharset(t *testing.T) {
}
func TestFailingCharset(t *testing.T) {
- runTests(t, dsn+"&charset=none", func(dbt *DBTest) {
+ runTestsParallel(t, dsn+"&charset=none", func(dbt *DBTest, _ string) {
// run query to really establish connection...
_, err := dbt.db.Exec("SELECT 1")
if err == nil {
@@ -1507,7 +1620,7 @@ func TestCollation(t *testing.T) {
}
func TestColumnsWithAlias(t *testing.T) {
- runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) {
+ runTestsParallel(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest, _ string) {
rows := dbt.mustQuery("SELECT 1 AS A")
defer rows.Close()
cols, _ := rows.Columns()
@@ -1531,7 +1644,7 @@ func TestColumnsWithAlias(t *testing.T) {
}
func TestRawBytesResultExceedsBuffer(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
// defaultBufSize from buffer.go
expected := strings.Repeat("abc", defaultBufSize)
@@ -1590,7 +1703,7 @@ func TestTimezoneConversion(t *testing.T) {
// Special cases
func TestRowsClose(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
rows, err := dbt.db.Query("SELECT 1")
if err != nil {
dbt.Fatal(err)
@@ -1615,7 +1728,7 @@ func TestRowsClose(t *testing.T) {
// dangling statements
// http://code.google.com/p/go/issues/detail?id=3865
func TestCloseStmtBeforeRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
stmt, err := dbt.db.Prepare("SELECT 1")
if err != nil {
dbt.Fatal(err)
@@ -1656,7 +1769,7 @@ func TestCloseStmtBeforeRows(t *testing.T) {
// It is valid to have multiple Rows for the same Stmt
// http://code.google.com/p/go/issues/detail?id=3734
func TestStmtMultiRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0")
if err != nil {
dbt.Fatal(err)
@@ -1782,7 +1895,7 @@ func TestPreparedManyCols(t *testing.T) {
// create more parameters than fit into the buffer
// which will take nil-values
- params := make([]interface{}, numParams)
+ params := make([]any, numParams)
rows, err := stmt.Query(params...)
if err != nil {
dbt.Fatal(err)
@@ -1807,13 +1920,13 @@ func TestConcurrent(t *testing.T) {
}
runTests(t, dsn, func(dbt *DBTest) {
- var version string
- if err := dbt.db.QueryRow("SELECT @@version").Scan(&version); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if strings.Contains(strings.ToLower(version), "mariadb") {
- t.Skip(`TODO: "fix commands out of sync. Did you run multiple statements at once?" on MariaDB`)
- }
+ // var version string
+ // if err := dbt.db.QueryRow("SELECT @@version").Scan(&version); err != nil {
+ // dbt.Fatal(err)
+ // }
+ // if strings.Contains(strings.ToLower(version), "mariadb") {
+ // t.Skip(`TODO: "fix commands out of sync. Did you run multiple statements at once?" on MariaDB`)
+ // }
var max int
err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max)
@@ -1829,7 +1942,7 @@ func TestConcurrent(t *testing.T) {
var fatalError string
var once sync.Once
- fatalf := func(s string, vals ...interface{}) {
+ fatalf := func(s string, vals ...any) {
once.Do(func() {
fatalError = fmt.Sprintf(s, vals...)
})
@@ -1840,7 +1953,6 @@ func TestConcurrent(t *testing.T) {
defer wg.Done()
tx, err := dbt.db.Begin()
- atomic.AddInt32(&remaining, -1)
if err != nil {
if err.Error() != "Error 1040: Too many connections" {
@@ -1850,7 +1962,7 @@ func TestConcurrent(t *testing.T) {
}
// keep the connection busy until all connections are open
- for remaining > 0 {
+ for atomic.AddInt32(&remaining, -1) > 0 {
if _, err = tx.Exec("DO 1"); err != nil {
fatalf("error on conn %d: %s", id, err.Error())
return
@@ -1867,7 +1979,7 @@ func TestConcurrent(t *testing.T) {
}(i)
}
- // wait until all conections are open
+ // wait until all connections are open
wg.Wait()
if fatalError != "" {
@@ -1883,7 +1995,7 @@ func testDialError(t *testing.T, dialErr error, expectErr error) {
return nil, dialErr
})
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
+ db, err := sql.Open(driverNameTest, fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
@@ -1916,13 +2028,13 @@ func TestCustomDial(t *testing.T) {
t.Skipf("MySQL server not running on %s", netAddr)
}
- // our custom dial function which justs wraps net.Dial here
+ // our custom dial function which just wraps net.Dial here
RegisterDialContext("mydial", func(ctx context.Context, addr string) (net.Conn, error) {
var d net.Dialer
return d.DialContext(ctx, prot, addr)
})
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
+ db, err := sql.Open(driverNameTest, fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
@@ -1933,6 +2045,40 @@ func TestCustomDial(t *testing.T) {
}
}
+func TestBeforeConnect(t *testing.T) {
+ if !available {
+ t.Skipf("MySQL server not running on %s", netAddr)
+ }
+
+ // dbname is set in the BeforeConnect handle
+ cfg, err := ParseDSN(fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, pass, netAddr, "_"))
+ if err != nil {
+ t.Fatalf("error parsing DSN: %v", err)
+ }
+
+ cfg.Apply(BeforeConnect(func(ctx context.Context, c *Config) error {
+ c.DBName = dbname
+ return nil
+ }))
+
+ connector, err := NewConnector(cfg)
+ if err != nil {
+ t.Fatalf("error creating connector: %v", err)
+ }
+
+ db := sql.OpenDB(connector)
+ defer db.Close()
+
+ var connectedDb string
+ err = db.QueryRow("SELECT DATABASE();").Scan(&connectedDb)
+ if err != nil {
+ t.Fatalf("error executing query: %v", err)
+ }
+ if connectedDb != dbname {
+ t.Fatalf("expected to connect to DB %s, but connected to %s instead", dbname, connectedDb)
+ }
+}
+
func TestSQLInjection(t *testing.T) {
createTest := func(arg string) func(dbt *DBTest) {
return func(dbt *DBTest) {
@@ -1995,7 +2141,7 @@ func TestInsertRetrieveEscapedData(t *testing.T) {
func TestUnixSocketAuthFail(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
// Save the current logger so we can restore it.
- oldLogger := errLog
+ oldLogger := defaultLogger
// Set a new logger so we can capture its output.
buffer := bytes.NewBuffer(make([]byte, 0, 64))
@@ -2020,7 +2166,7 @@ func TestUnixSocketAuthFail(t *testing.T) {
}
t.Logf("socket: %s", socket)
badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s", user, badPass, socket, dbname)
- db, err := sql.Open("mysql", badDSN)
+ db, err := sql.Open(driverNameTest, badDSN)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
@@ -2155,11 +2301,51 @@ func TestRejectReadOnly(t *testing.T) {
}
func TestPing(t *testing.T) {
+ ctx := context.Background()
runTests(t, dsn, func(dbt *DBTest) {
if err := dbt.db.Ping(); err != nil {
dbt.fail("Ping", "Ping", err)
}
})
+
+ runTests(t, dsn, func(dbt *DBTest) {
+ conn, err := dbt.db.Conn(ctx)
+ if err != nil {
+ dbt.fail("db", "Conn", err)
+ }
+
+ // Check that affectedRows and insertIds are cleared after each call.
+ conn.Raw(func(conn any) error {
+ c := conn.(*mysqlConn)
+
+ // Issue a query that sets affectedRows and insertIds.
+ q, err := c.Query(`SELECT 1`, nil)
+ if err != nil {
+ dbt.fail("Conn", "Query", err)
+ }
+ if got, want := c.result.affectedRows, []int64{0}; !reflect.DeepEqual(got, want) {
+ dbt.Fatalf("bad affectedRows: got %v, want=%v", got, want)
+ }
+ if got, want := c.result.insertIds, []int64{0}; !reflect.DeepEqual(got, want) {
+ dbt.Fatalf("bad insertIds: got %v, want=%v", got, want)
+ }
+ q.Close()
+
+ // Verify that Ping() clears both fields.
+ for i := 0; i < 2; i++ {
+ if err := c.Ping(ctx); err != nil {
+ dbt.fail("Pinger", "Ping", err)
+ }
+ if got, want := c.result.affectedRows, []int64(nil); !reflect.DeepEqual(got, want) {
+ t.Errorf("bad affectedRows: got %v, want=%v", got, want)
+ }
+ if got, want := c.result.insertIds, []int64(nil); !reflect.DeepEqual(got, want) {
+ t.Errorf("bad affectedRows: got %v, want=%v", got, want)
+ }
+ }
+ return nil
+ })
+ })
}
// See Issue #799
@@ -2169,7 +2355,7 @@ func TestEmptyPassword(t *testing.T) {
}
dsn := fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, "", netAddr, dbname)
- db, err := sql.Open("mysql", dsn)
+ db, err := sql.Open(driverNameTest, dsn)
if err == nil {
defer db.Close()
err = db.Ping()
@@ -2379,10 +2565,47 @@ func TestMultiResultSetNoSelect(t *testing.T) {
})
}
+func TestExecMultipleResults(t *testing.T) {
+ ctx := context.Background()
+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec(`
+ CREATE TABLE test (
+ id INT NOT NULL AUTO_INCREMENT,
+ value VARCHAR(255),
+ PRIMARY KEY (id)
+ )`)
+ conn, err := dbt.db.Conn(ctx)
+ if err != nil {
+ t.Fatalf("failed to connect: %v", err)
+ }
+ conn.Raw(func(conn any) error {
+ //lint:ignore SA1019 this is a test
+ ex := conn.(driver.Execer)
+ res, err := ex.Exec(`
+ INSERT INTO test (value) VALUES ('a'), ('b');
+ INSERT INTO test (value) VALUES ('c'), ('d'), ('e');
+ `, nil)
+ if err != nil {
+ t.Fatalf("insert statements failed: %v", err)
+ }
+ mres := res.(Result)
+ if got, want := mres.AllRowsAffected(), []int64{2, 3}; !reflect.DeepEqual(got, want) {
+ t.Errorf("bad AllRowsAffected: got %v, want=%v", got, want)
+ }
+ // For INSERTs containing multiple rows, LAST_INSERT_ID() returns the
+ // first inserted ID, not the last.
+ if got, want := mres.AllLastInsertIds(), []int64{1, 3}; !reflect.DeepEqual(got, want) {
+ t.Errorf("bad AllLastInsertIds: got %v, want %v", got, want)
+ }
+ return nil
+ })
+ })
+}
+
// tests if rows are set in a proper state if some results were ignored before
// calling rows.NextResultSet.
func TestSkipResults(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
rows := dbt.mustQuery("SELECT 1, 2")
defer rows.Close()
@@ -2400,8 +2623,44 @@ func TestSkipResults(t *testing.T) {
})
}
+func TestQueryMultipleResults(t *testing.T) {
+ ctx := context.Background()
+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec(`
+ CREATE TABLE test (
+ id INT NOT NULL AUTO_INCREMENT,
+ value VARCHAR(255),
+ PRIMARY KEY (id)
+ )`)
+ conn, err := dbt.db.Conn(ctx)
+ if err != nil {
+ t.Fatalf("failed to connect: %v", err)
+ }
+ conn.Raw(func(conn any) error {
+ //lint:ignore SA1019 this is a test
+ qr := conn.(driver.Queryer)
+ c := conn.(*mysqlConn)
+
+ // Demonstrate that repeated queries reset the affectedRows
+ for i := 0; i < 2; i++ {
+ _, err := qr.Query(`
+ INSERT INTO test (value) VALUES ('a'), ('b');
+ INSERT INTO test (value) VALUES ('c'), ('d'), ('e');
+ `, nil)
+ if err != nil {
+ t.Fatalf("insert statements failed: %v", err)
+ }
+ if got, want := c.result.affectedRows, []int64{2, 3}; !reflect.DeepEqual(got, want) {
+ t.Errorf("bad affectedRows: got %v, want=%v", got, want)
+ }
+ }
+ return nil
+ })
+ })
+}
+
func TestPingContext(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
if err := dbt.db.PingContext(ctx); err != context.Canceled {
@@ -2411,8 +2670,8 @@ func TestPingContext(t *testing.T) {
}
func TestContextCancelExec(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
// Delay execution for just a bit until db.ExecContext has begun.
@@ -2420,7 +2679,7 @@ func TestContextCancelExec(t *testing.T) {
// This query will be canceled.
startTime := time.Now()
- if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+ if _, err := dbt.db.ExecContext(ctx, "INSERT INTO "+tbl+" VALUES (SLEEP(1))"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
if d := time.Since(startTime); d > 500*time.Millisecond {
@@ -2432,7 +2691,7 @@ func TestContextCancelExec(t *testing.T) {
// Check how many times the query is executed.
var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
@@ -2440,14 +2699,14 @@ func TestContextCancelExec(t *testing.T) {
}
// Context is already canceled, so error should come before execution.
- if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (1)"); err == nil {
+ if _, err := dbt.db.ExecContext(ctx, "INSERT INTO "+tbl+" VALUES (1)"); err == nil {
dbt.Error("expected error")
} else if err.Error() != "context canceled" {
dbt.Fatalf("unexpected error: %s", err)
}
// The second insert query will fail, so the table has no changes.
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 {
@@ -2457,8 +2716,8 @@ func TestContextCancelExec(t *testing.T) {
}
func TestContextCancelQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
// Delay execution for just a bit until db.ExecContext has begun.
@@ -2466,7 +2725,7 @@ func TestContextCancelQuery(t *testing.T) {
// This query will be canceled.
startTime := time.Now()
- if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+ if _, err := dbt.db.QueryContext(ctx, "INSERT INTO "+tbl+" VALUES (SLEEP(1))"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
if d := time.Since(startTime); d > 500*time.Millisecond {
@@ -2478,7 +2737,7 @@ func TestContextCancelQuery(t *testing.T) {
// Check how many times the query is executed.
var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
@@ -2486,12 +2745,12 @@ func TestContextCancelQuery(t *testing.T) {
}
// Context is already canceled, so error should come before execution.
- if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (1)"); err != context.Canceled {
+ if _, err := dbt.db.QueryContext(ctx, "INSERT INTO "+tbl+" VALUES (1)"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
// The second insert query will fail, so the table has no changes.
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 {
@@ -2501,12 +2760,12 @@ func TestContextCancelQuery(t *testing.T) {
}
func TestContextCancelQueryRow(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- dbt.mustExec("INSERT INTO test VALUES (1), (2), (3)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
+ dbt.mustExec("INSERT INTO " + tbl + " VALUES (1), (2), (3)")
ctx, cancel := context.WithCancel(context.Background())
- rows, err := dbt.db.QueryContext(ctx, "SELECT v FROM test")
+ rows, err := dbt.db.QueryContext(ctx, "SELECT v FROM "+tbl)
if err != nil {
dbt.Fatalf("%s", err.Error())
}
@@ -2534,7 +2793,7 @@ func TestContextCancelQueryRow(t *testing.T) {
}
func TestContextCancelPrepare(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
if _, err := dbt.db.PrepareContext(ctx, "SELECT 1"); err != context.Canceled {
@@ -2544,10 +2803,10 @@ func TestContextCancelPrepare(t *testing.T) {
}
func TestContextCancelStmtExec(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
- stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
+ stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO "+tbl+" VALUES (SLEEP(1))")
if err != nil {
dbt.Fatalf("unexpected error: %v", err)
}
@@ -2569,7 +2828,7 @@ func TestContextCancelStmtExec(t *testing.T) {
// Check how many times the query is executed.
var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
@@ -2579,10 +2838,10 @@ func TestContextCancelStmtExec(t *testing.T) {
}
func TestContextCancelStmtQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
- stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
+ stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO "+tbl+" VALUES (SLEEP(1))")
if err != nil {
dbt.Fatalf("unexpected error: %v", err)
}
@@ -2604,7 +2863,7 @@ func TestContextCancelStmtQuery(t *testing.T) {
// Check how many times the query is executed.
var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
@@ -2618,8 +2877,8 @@ func TestContextCancelBegin(t *testing.T) {
t.Skip(`FIXME: it sometime fails with "expected driver.ErrBadConn, got sql: connection is already closed" on windows and macOS`)
}
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
conn, err := dbt.db.Conn(ctx)
if err != nil {
@@ -2636,7 +2895,7 @@ func TestContextCancelBegin(t *testing.T) {
// This query will be canceled.
startTime := time.Now()
- if _, err := tx.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+ if _, err := tx.ExecContext(ctx, "INSERT INTO "+tbl+" VALUES (SLEEP(1))"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
if d := time.Since(startTime); d > 500*time.Millisecond {
@@ -2674,8 +2933,8 @@ func TestContextCancelBegin(t *testing.T) {
}
func TestContextBeginIsolationLevel(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -2693,13 +2952,13 @@ func TestContextBeginIsolationLevel(t *testing.T) {
dbt.Fatal(err)
}
- _, err = tx1.ExecContext(ctx, "INSERT INTO test VALUES (1)")
+ _, err = tx1.ExecContext(ctx, "INSERT INTO "+tbl+" VALUES (1)")
if err != nil {
dbt.Fatal(err)
}
var v int
- row := tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+ row := tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM "+tbl)
if err := row.Scan(&v); err != nil {
dbt.Fatal(err)
}
@@ -2713,7 +2972,7 @@ func TestContextBeginIsolationLevel(t *testing.T) {
dbt.Fatal(err)
}
- row = tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+ row = tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM "+tbl)
if err := row.Scan(&v); err != nil {
dbt.Fatal(err)
}
@@ -2726,8 +2985,8 @@ func TestContextBeginIsolationLevel(t *testing.T) {
}
func TestContextBeginReadOnly(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -2742,14 +3001,14 @@ func TestContextBeginReadOnly(t *testing.T) {
}
// INSERT queries fail in a READ ONLY transaction.
- _, err = tx.ExecContext(ctx, "INSERT INTO test VALUES (1)")
+ _, err = tx.ExecContext(ctx, "INSERT INTO "+tbl+" VALUES (1)")
if _, ok := err.(*MySQLError); !ok {
dbt.Errorf("expected MySQLError, got %v", err)
}
// SELECT queries can be executed.
var v int
- row := tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+ row := tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM "+tbl)
if err := row.Scan(&v); err != nil {
dbt.Fatal(err)
}
@@ -2778,13 +3037,18 @@ func TestRowsColumnTypes(t *testing.T) {
nd1 := sql.NullTime{Time: time.Date(2006, 01, 02, 0, 0, 0, 0, time.UTC), Valid: true}
nd2 := sql.NullTime{Time: time.Date(2006, 03, 04, 0, 0, 0, 0, time.UTC), Valid: true}
ndNULL := sql.NullTime{Time: time.Time{}, Valid: false}
- rbNULL := sql.RawBytes(nil)
- rb0 := sql.RawBytes("0")
- rb42 := sql.RawBytes("42")
- rbTest := sql.RawBytes("Test")
- rb0pad4 := sql.RawBytes("0\x00\x00\x00") // BINARY right-pads values with 0x00
- rbx0 := sql.RawBytes("\x00")
- rbx42 := sql.RawBytes("\x42")
+ bNULL := []byte(nil)
+ nsNULL := sql.NullString{String: "", Valid: false}
+ // Helper function to build NullString from string literal.
+ ns := func(s string) sql.NullString { return sql.NullString{String: s, Valid: true} }
+ ns0 := ns("0")
+ b0 := []byte("0")
+ b42 := []byte("42")
+ nsTest := ns("Test")
+ bTest := []byte("Test")
+ b0pad4 := []byte("0\x00\x00\x00") // BINARY right-pads values with 0x00
+ bx0 := []byte("\x00")
+ bx42 := []byte("\x42")
var columns = []struct {
name string
@@ -2795,51 +3059,54 @@ func TestRowsColumnTypes(t *testing.T) {
precision int64 // 0 if not ok
scale int64
valuesIn [3]string
- valuesOut [3]interface{}
+ valuesOut [3]any
}{
- {"bit8null", "BIT(8)", "BIT", scanTypeRawBytes, true, 0, 0, [3]string{"0x0", "NULL", "0x42"}, [3]interface{}{rbx0, rbNULL, rbx42}},
- {"boolnull", "BOOL", "TINYINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "true", "0"}, [3]interface{}{niNULL, ni1, ni0}},
- {"bool", "BOOL NOT NULL", "TINYINT", scanTypeInt8, false, 0, 0, [3]string{"1", "0", "FALSE"}, [3]interface{}{int8(1), int8(0), int8(0)}},
- {"intnull", "INTEGER", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"smallint", "SMALLINT NOT NULL", "SMALLINT", scanTypeInt16, false, 0, 0, [3]string{"0", "-32768", "32767"}, [3]interface{}{int16(0), int16(-32768), int16(32767)}},
- {"smallintnull", "SMALLINT", "SMALLINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"int3null", "INT(3)", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"int7", "INT(7) NOT NULL", "INT", scanTypeInt32, false, 0, 0, [3]string{"0", "-1337", "42"}, [3]interface{}{int32(0), int32(-1337), int32(42)}},
- {"mediumintnull", "MEDIUMINT", "MEDIUMINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "42", "NULL"}, [3]interface{}{ni0, ni42, niNULL}},
- {"bigint", "BIGINT NOT NULL", "BIGINT", scanTypeInt64, false, 0, 0, [3]string{"0", "65535", "-42"}, [3]interface{}{int64(0), int64(65535), int64(-42)}},
- {"bigintnull", "BIGINT", "BIGINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "1", "42"}, [3]interface{}{niNULL, ni1, ni42}},
- {"tinyuint", "TINYINT UNSIGNED NOT NULL", "UNSIGNED TINYINT", scanTypeUint8, false, 0, 0, [3]string{"0", "255", "42"}, [3]interface{}{uint8(0), uint8(255), uint8(42)}},
- {"smalluint", "SMALLINT UNSIGNED NOT NULL", "UNSIGNED SMALLINT", scanTypeUint16, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint16(0), uint16(65535), uint16(42)}},
- {"biguint", "BIGINT UNSIGNED NOT NULL", "UNSIGNED BIGINT", scanTypeUint64, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint64(0), uint64(65535), uint64(42)}},
- {"uint13", "INT(13) UNSIGNED NOT NULL", "UNSIGNED INT", scanTypeUint32, false, 0, 0, [3]string{"0", "1337", "42"}, [3]interface{}{uint32(0), uint32(1337), uint32(42)}},
- {"float", "FLOAT NOT NULL", "FLOAT", scanTypeFloat32, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float32(0), float32(42), float32(13.37)}},
- {"floatnull", "FLOAT", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"float74null", "FLOAT(7,4)", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, 4, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"double", "DOUBLE NOT NULL", "DOUBLE", scanTypeFloat64, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float64(0), float64(42), float64(13.37)}},
- {"doublenull", "DOUBLE", "DOUBLE", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"decimal1", "DECIMAL(10,6) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 10, 6, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), sql.RawBytes("13.370000"), sql.RawBytes("1234.123456")}},
- {"decimal1null", "DECIMAL(10,6)", "DECIMAL", scanTypeRawBytes, true, 10, 6, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), rbNULL, sql.RawBytes("1234.123456")}},
- {"decimal2", "DECIMAL(8,4) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 8, 4, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), sql.RawBytes("13.3700"), sql.RawBytes("1234.1235")}},
- {"decimal2null", "DECIMAL(8,4)", "DECIMAL", scanTypeRawBytes, true, 8, 4, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), rbNULL, sql.RawBytes("1234.1235")}},
- {"decimal3", "DECIMAL(5,0) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 5, 0, [3]string{"0", "13.37", "-12345.123456"}, [3]interface{}{rb0, sql.RawBytes("13"), sql.RawBytes("-12345")}},
- {"decimal3null", "DECIMAL(5,0)", "DECIMAL", scanTypeRawBytes, true, 5, 0, [3]string{"0", "NULL", "-12345.123456"}, [3]interface{}{rb0, rbNULL, sql.RawBytes("-12345")}},
- {"char25null", "CHAR(25)", "CHAR", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"varchar42", "VARCHAR(42) NOT NULL", "VARCHAR", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"binary4null", "BINARY(4)", "BINARY", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0pad4, rbNULL, rbTest}},
- {"varbinary42", "VARBINARY(42) NOT NULL", "VARBINARY", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"tinyblobnull", "TINYBLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"tinytextnull", "TINYTEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"blobnull", "BLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"textnull", "TEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"mediumblob", "MEDIUMBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"mediumtext", "MEDIUMTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"longblob", "LONGBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"longtext", "LONGTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"datetime", "DATETIME", "DATETIME", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt0, nt0}},
- {"datetime2", "DATETIME(2)", "DATETIME", scanTypeNullTime, true, 2, 2, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt2}},
- {"datetime6", "DATETIME(6)", "DATETIME", scanTypeNullTime, true, 6, 6, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt6}},
- {"date", "DATE", "DATE", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02'", "NULL", "'2006-03-04'"}, [3]interface{}{nd1, ndNULL, nd2}},
- {"year", "YEAR NOT NULL", "YEAR", scanTypeUint16, false, 0, 0, [3]string{"2006", "2000", "1994"}, [3]interface{}{uint16(2006), uint16(2000), uint16(1994)}},
+ {"bit8null", "BIT(8)", "BIT", scanTypeBytes, true, 0, 0, [3]string{"0x0", "NULL", "0x42"}, [3]any{bx0, bNULL, bx42}},
+ {"boolnull", "BOOL", "TINYINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "true", "0"}, [3]any{niNULL, ni1, ni0}},
+ {"bool", "BOOL NOT NULL", "TINYINT", scanTypeInt8, false, 0, 0, [3]string{"1", "0", "FALSE"}, [3]any{int8(1), int8(0), int8(0)}},
+ {"intnull", "INTEGER", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]any{ni0, niNULL, ni42}},
+ {"smallint", "SMALLINT NOT NULL", "SMALLINT", scanTypeInt16, false, 0, 0, [3]string{"0", "-32768", "32767"}, [3]any{int16(0), int16(-32768), int16(32767)}},
+ {"smallintnull", "SMALLINT", "SMALLINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]any{ni0, niNULL, ni42}},
+ {"int3null", "INT(3)", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]any{ni0, niNULL, ni42}},
+ {"int7", "INT(7) NOT NULL", "INT", scanTypeInt32, false, 0, 0, [3]string{"0", "-1337", "42"}, [3]any{int32(0), int32(-1337), int32(42)}},
+ {"mediumintnull", "MEDIUMINT", "MEDIUMINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "42", "NULL"}, [3]any{ni0, ni42, niNULL}},
+ {"bigint", "BIGINT NOT NULL", "BIGINT", scanTypeInt64, false, 0, 0, [3]string{"0", "65535", "-42"}, [3]any{int64(0), int64(65535), int64(-42)}},
+ {"bigintnull", "BIGINT", "BIGINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "1", "42"}, [3]any{niNULL, ni1, ni42}},
+ {"tinyuint", "TINYINT UNSIGNED NOT NULL", "UNSIGNED TINYINT", scanTypeUint8, false, 0, 0, [3]string{"0", "255", "42"}, [3]any{uint8(0), uint8(255), uint8(42)}},
+ {"smalluint", "SMALLINT UNSIGNED NOT NULL", "UNSIGNED SMALLINT", scanTypeUint16, false, 0, 0, [3]string{"0", "65535", "42"}, [3]any{uint16(0), uint16(65535), uint16(42)}},
+ {"biguint", "BIGINT UNSIGNED NOT NULL", "UNSIGNED BIGINT", scanTypeUint64, false, 0, 0, [3]string{"0", "65535", "42"}, [3]any{uint64(0), uint64(65535), uint64(42)}},
+ {"mediumuint", "MEDIUMINT UNSIGNED NOT NULL", "UNSIGNED MEDIUMINT", scanTypeUint32, false, 0, 0, [3]string{"0", "16777215", "42"}, [3]any{uint32(0), uint32(16777215), uint32(42)}},
+ {"uint13", "INT(13) UNSIGNED NOT NULL", "UNSIGNED INT", scanTypeUint32, false, 0, 0, [3]string{"0", "1337", "42"}, [3]any{uint32(0), uint32(1337), uint32(42)}},
+ {"float", "FLOAT NOT NULL", "FLOAT", scanTypeFloat32, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]any{float32(0), float32(42), float32(13.37)}},
+ {"floatnull", "FLOAT", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]any{nf0, nfNULL, nf1337}},
+ {"float74null", "FLOAT(7,4)", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, 4, [3]string{"0", "NULL", "13.37"}, [3]any{nf0, nfNULL, nf1337}},
+ {"double", "DOUBLE NOT NULL", "DOUBLE", scanTypeFloat64, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]any{float64(0), float64(42), float64(13.37)}},
+ {"doublenull", "DOUBLE", "DOUBLE", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]any{nf0, nfNULL, nf1337}},
+ {"decimal1", "DECIMAL(10,6) NOT NULL", "DECIMAL", scanTypeString, false, 10, 6, [3]string{"0", "13.37", "1234.123456"}, [3]any{"0.000000", "13.370000", "1234.123456"}},
+ {"decimal1null", "DECIMAL(10,6)", "DECIMAL", scanTypeNullString, true, 10, 6, [3]string{"0", "NULL", "1234.123456"}, [3]any{ns("0.000000"), nsNULL, ns("1234.123456")}},
+ {"decimal2", "DECIMAL(8,4) NOT NULL", "DECIMAL", scanTypeString, false, 8, 4, [3]string{"0", "13.37", "1234.123456"}, [3]any{"0.0000", "13.3700", "1234.1235"}},
+ {"decimal2null", "DECIMAL(8,4)", "DECIMAL", scanTypeNullString, true, 8, 4, [3]string{"0", "NULL", "1234.123456"}, [3]any{ns("0.0000"), nsNULL, ns("1234.1235")}},
+ {"decimal3", "DECIMAL(5,0) NOT NULL", "DECIMAL", scanTypeString, false, 5, 0, [3]string{"0", "13.37", "-12345.123456"}, [3]any{"0", "13", "-12345"}},
+ {"decimal3null", "DECIMAL(5,0)", "DECIMAL", scanTypeNullString, true, 5, 0, [3]string{"0", "NULL", "-12345.123456"}, [3]any{ns0, nsNULL, ns("-12345")}},
+ {"char25null", "CHAR(25)", "CHAR", scanTypeNullString, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{ns0, nsNULL, nsTest}},
+ {"varchar42", "VARCHAR(42) NOT NULL", "VARCHAR", scanTypeString, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{"0", "Test", "42"}},
+ {"binary4null", "BINARY(4)", "BINARY", scanTypeBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{b0pad4, bNULL, bTest}},
+ {"varbinary42", "VARBINARY(42) NOT NULL", "VARBINARY", scanTypeBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{b0, bTest, b42}},
+ {"tinyblobnull", "TINYBLOB", "BLOB", scanTypeBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{b0, bNULL, bTest}},
+ {"tinytextnull", "TINYTEXT", "TEXT", scanTypeNullString, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{ns0, nsNULL, nsTest}},
+ {"blobnull", "BLOB", "BLOB", scanTypeBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{b0, bNULL, bTest}},
+ {"textnull", "TEXT", "TEXT", scanTypeNullString, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{ns0, nsNULL, nsTest}},
+ {"mediumblob", "MEDIUMBLOB NOT NULL", "BLOB", scanTypeBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{b0, bTest, b42}},
+ {"mediumtext", "MEDIUMTEXT NOT NULL", "TEXT", scanTypeString, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{"0", "Test", "42"}},
+ {"longblob", "LONGBLOB NOT NULL", "BLOB", scanTypeBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{b0, bTest, b42}},
+ {"longtext", "LONGTEXT NOT NULL", "TEXT", scanTypeString, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{"0", "Test", "42"}},
+ {"datetime", "DATETIME", "DATETIME", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]any{nt0, nt0, nt0}},
+ {"datetime2", "DATETIME(2)", "DATETIME", scanTypeNullTime, true, 2, 2, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]any{nt0, nt1, nt2}},
+ {"datetime6", "DATETIME(6)", "DATETIME", scanTypeNullTime, true, 6, 6, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]any{nt0, nt1, nt6}},
+ {"date", "DATE", "DATE", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02'", "NULL", "'2006-03-04'"}, [3]any{nd1, ndNULL, nd2}},
+ {"year", "YEAR NOT NULL", "YEAR", scanTypeUint16, false, 0, 0, [3]string{"2006", "2000", "1994"}, [3]any{uint16(2006), uint16(2000), uint16(1994)}},
+ {"enum", "ENUM('', 'v1', 'v2')", "ENUM", scanTypeNullString, true, 0, 0, [3]string{"''", "'v1'", "'v2'"}, [3]any{ns(""), ns("v1"), ns("v2")}},
+ {"set", "set('', 'v1', 'v2')", "SET", scanTypeNullString, true, 0, 0, [3]string{"''", "'v1'", "'v1,v2'"}, [3]any{ns(""), ns("v1"), ns("v1,v2")}},
}
schema := ""
@@ -2945,8 +3212,11 @@ func TestRowsColumnTypes(t *testing.T) {
continue
}
}
-
- values := make([]interface{}, len(tt))
+ // Avoid panic caused by nil scantype.
+ if t.Failed() {
+ return
+ }
+ values := make([]any, len(tt))
for i := range values {
values[i] = reflect.New(types[i]).Interface()
}
@@ -2956,14 +3226,10 @@ func TestRowsColumnTypes(t *testing.T) {
if err != nil {
t.Fatalf("failed to scan values in %v", err)
}
- for j := range values {
- value := reflect.ValueOf(values[j]).Elem().Interface()
+ for j, value := range values {
+ value := reflect.ValueOf(value).Elem().Interface()
if !reflect.DeepEqual(value, columns[j].valuesOut[i]) {
- if columns[j].scanType == scanTypeRawBytes {
- t.Errorf("row %d, column %d: %v != %v", i, j, string(value.(sql.RawBytes)), string(columns[j].valuesOut[i].(sql.RawBytes)))
- } else {
- t.Errorf("row %d, column %d: %v != %v", i, j, value, columns[j].valuesOut[i])
- }
+ t.Errorf("row %d, column %d: %v != %v", i, j, value, columns[j].valuesOut[i])
}
}
i++
@@ -2979,9 +3245,9 @@ func TestRowsColumnTypes(t *testing.T) {
}
func TestValuerWithValueReceiverGivenNilValue(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (value VARCHAR(255))")
- dbt.db.Exec("INSERT INTO test VALUES (?)", (*testValuer)(nil))
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (value VARCHAR(255))")
+ dbt.db.Exec("INSERT INTO "+tbl+" VALUES (?)", (*testValuer)(nil))
// This test will panic on the INSERT if ConvertValue() does not check for typed nil before calling Value()
})
}
@@ -3015,27 +3281,28 @@ func TestRawBytesAreNotModified(t *testing.T) {
rows, err := dbt.db.QueryContext(ctx, `SELECT id, value FROM test`)
if err != nil {
- t.Fatal(err)
+ dbt.Fatal(err)
}
+ defer rows.Close()
var b int
var raw sql.RawBytes
- for rows.Next() {
- if err := rows.Scan(&b, &raw); err != nil {
- t.Fatal(err)
- }
+ if !rows.Next() {
+ dbt.Fatal("expected at least one row")
+ }
+ if err := rows.Scan(&b, &raw); err != nil {
+ dbt.Fatal(err)
+ }
- before := string(raw)
- // Ensure cancelling the query does not corrupt the contents of `raw`
- cancel()
- time.Sleep(time.Microsecond * 100)
- after := string(raw)
+ before := string(raw)
+ // Ensure cancelling the query does not corrupt the contents of `raw`
+ cancel()
+ time.Sleep(time.Microsecond * 100)
+ after := string(raw)
- if before != after {
- t.Fatalf("the backing storage for sql.RawBytes has been modified (i=%v)", i)
- }
+ if before != after {
+ dbt.Fatalf("the backing storage for sql.RawBytes has been modified (i=%v)", i)
}
- rows.Close()
}()
}
})
@@ -3058,7 +3325,7 @@ func TestConnectorObeysDialTimeouts(t *testing.T) {
return d.DialContext(ctx, prot, addr)
})
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@dialctxtest(%s)/%s?timeout=30s", user, pass, addr, dbname))
+ db, err := sql.Open(driverNameTest, fmt.Sprintf("%s:%s@dialctxtest(%s)/%s?timeout=30s", user, pass, addr, dbname))
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
@@ -3209,3 +3476,137 @@ func TestConnectorTimeoutsWatchCancel(t *testing.T) {
t.Errorf("connection not closed")
}
}
+
+func TestConnectionAttributes(t *testing.T) {
+ if !available {
+ t.Skipf("MySQL server not running on %s", netAddr)
+ }
+
+ defaultAttrs := []string{
+ connAttrClientName,
+ connAttrOS,
+ connAttrPlatform,
+ connAttrPid,
+ connAttrServerHost,
+ }
+ host, _, _ := net.SplitHostPort(addr)
+ defaultAttrValues := []string{
+ connAttrClientNameValue,
+ connAttrOSValue,
+ connAttrPlatformValue,
+ strconv.Itoa(os.Getpid()),
+ host,
+ }
+
+ customAttrs := []string{"attr1", "fo/o"}
+ customAttrValues := []string{"value1", "bo/o"}
+
+ customAttrStrs := make([]string, len(customAttrs))
+ for i := range customAttrs {
+ customAttrStrs[i] = fmt.Sprintf("%s:%s", customAttrs[i], customAttrValues[i])
+ }
+ dsn += "&connectionAttributes=" + url.QueryEscape(strings.Join(customAttrStrs, ","))
+
+ var db *sql.DB
+ if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation {
+ db, err = sql.Open(driverNameTest, dsn)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ defer db.Close()
+ }
+
+ dbt := &DBTest{t, db}
+
+ queryString := "SELECT ATTR_NAME, ATTR_VALUE FROM performance_schema.session_account_connect_attrs WHERE PROCESSLIST_ID = CONNECTION_ID()"
+ rows := dbt.mustQuery(queryString)
+ defer rows.Close()
+
+ rowsMap := make(map[string]string)
+ for rows.Next() {
+ var attrName, attrValue string
+ rows.Scan(&attrName, &attrValue)
+ rowsMap[attrName] = attrValue
+ }
+
+ connAttrs := append(append([]string{}, defaultAttrs...), customAttrs...)
+ expectedAttrValues := append(append([]string{}, defaultAttrValues...), customAttrValues...)
+ for i := range connAttrs {
+ if gotValue := rowsMap[connAttrs[i]]; gotValue != expectedAttrValues[i] {
+ dbt.Errorf("expected %q, got %q", expectedAttrValues[i], gotValue)
+ }
+ }
+}
+
+func TestErrorInMultiResult(t *testing.T) {
+ // https://github.com/go-sql-driver/mysql/issues/1361
+ var db *sql.DB
+ if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation {
+ db, err = sql.Open("mysql", dsn)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ defer db.Close()
+ }
+
+ dbt := &DBTest{t, db}
+ query := `
+CREATE PROCEDURE test_proc1()
+BEGIN
+ SELECT 1,2;
+ SELECT 3,4;
+ SIGNAL SQLSTATE '10000' SET MESSAGE_TEXT = "some error", MYSQL_ERRNO = 10000;
+END;
+`
+ runCallCommand(dbt, query, "test_proc1")
+}
+
+func runCallCommand(dbt *DBTest, query, name string) {
+ dbt.mustExec(fmt.Sprintf("DROP PROCEDURE IF EXISTS %s", name))
+ dbt.mustExec(query)
+ defer dbt.mustExec("DROP PROCEDURE " + name)
+ rows, err := dbt.db.Query(fmt.Sprintf("CALL %s", name))
+ if err != nil {
+ return
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ }
+ for rows.NextResultSet() {
+ for rows.Next() {
+ }
+ }
+}
+
+func TestIssue1567(t *testing.T) {
+ // enable TLS.
+ runTests(t, dsn+"&tls=skip-verify", func(dbt *DBTest) {
+ // disable connection pooling.
+ // data race happens when new connection is created.
+ dbt.db.SetMaxIdleConns(0)
+
+ // estimate round trip time.
+ start := time.Now()
+ if err := dbt.db.PingContext(context.Background()); err != nil {
+ t.Fatal(err)
+ }
+ rtt := time.Since(start)
+ if rtt <= 0 {
+ // In some environments, rtt may become 0, so set it to at least 1ms.
+ rtt = time.Millisecond
+ }
+
+ count := 1000
+ if testing.Short() {
+ count = 10
+ }
+
+ for i := 0; i < count; i++ {
+ timeout := time.Duration(mrand.Int63n(int64(rtt)))
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ dbt.db.PingContext(ctx)
+ cancel()
+ }
+ })
+}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn.go
index 4b71aaa..65f5a02 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn.go
@@ -10,6 +10,7 @@ package mysql
import (
"bytes"
+ "context"
"crypto/rsa"
"crypto/tls"
"errors"
@@ -34,22 +35,27 @@ var (
// If a new Config is created instead of being parsed from a DSN string,
// the NewConfig function should be used, which sets default values.
type Config struct {
- User string // Username
- Passwd string // Password (requires User)
- Net string // Network type
- Addr string // Network address (requires Net)
- DBName string // Database name
- Params map[string]string // Connection parameters
- Collation string // Connection collation
- Loc *time.Location // Location for time.Time values
- MaxAllowedPacket int // Max packet size allowed
- ServerPubKey string // Server public key name
- pubKey *rsa.PublicKey // Server public key
- TLSConfig string // TLS configuration name
- TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
- Timeout time.Duration // Dial timeout
- ReadTimeout time.Duration // I/O read timeout
- WriteTimeout time.Duration // I/O write timeout
+ // non boolean fields
+
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network (e.g. "tcp", "tcp6", "unix". default: "tcp")
+ Addr string // Address (default: "127.0.0.1:3306" for "tcp" and "/tmp/mysql.sock" for "unix")
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ ConnectionAttributes string // Connection Attributes, comma-delimited string of user-defined "key:value" pairs
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ TLSConfig string // TLS configuration name
+ TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+ Logger Logger // Logger
+
+ // boolean fields
AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
AllowCleartextPasswords bool // Allows the cleartext client side plugin
@@ -63,17 +69,57 @@ type Config struct {
MultiStatements bool // Allow multiple statements in one query
ParseTime bool // Parse time values to time.Time
RejectReadOnly bool // Reject read-only connections
+
+ // unexported fields. new options should be come here
+
+ beforeConnect func(context.Context, *Config) error // Invoked before a connection is established
+ pubKey *rsa.PublicKey // Server public key
+ timeTruncate time.Duration // Truncate time.Time values to the specified duration
}
+// Functional Options Pattern
+// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type Option func(*Config) error
+
// NewConfig creates a new Config and sets default values.
func NewConfig() *Config {
- return &Config{
- Collation: defaultCollation,
+ cfg := &Config{
Loc: time.UTC,
MaxAllowedPacket: defaultMaxAllowedPacket,
+ Logger: defaultLogger,
AllowNativePasswords: true,
CheckConnLiveness: true,
}
+
+ return cfg
+}
+
+// Apply applies the given options to the Config object.
+func (c *Config) Apply(opts ...Option) error {
+ for _, opt := range opts {
+ err := opt(c)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TimeTruncate sets the time duration to truncate time.Time values in
+// query parameters.
+func TimeTruncate(d time.Duration) Option {
+ return func(cfg *Config) error {
+ cfg.timeTruncate = d
+ return nil
+ }
+}
+
+// BeforeConnect sets the function to be invoked before a connection is established.
+func BeforeConnect(fn func(context.Context, *Config) error) Option {
+ return func(cfg *Config) error {
+ cfg.beforeConnect = fn
+ return nil
+ }
}
func (cfg *Config) Clone() *Config {
@@ -97,7 +143,7 @@ func (cfg *Config) Clone() *Config {
}
func (cfg *Config) normalize() error {
- if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ if cfg.InterpolateParams && cfg.Collation != "" && unsafeCollations[cfg.Collation] {
return errInvalidDSNUnsafeCollation
}
@@ -153,6 +199,10 @@ func (cfg *Config) normalize() error {
}
}
+ if cfg.Logger == nil {
+ cfg.Logger = defaultLogger
+ }
+
return nil
}
@@ -171,6 +221,8 @@ func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) {
// FormatDSN formats the given Config into a DSN string which can be passed to
// the driver.
+//
+// Note: use [NewConnector] and [database/sql.OpenDB] to open a connection from a [*Config].
func (cfg *Config) FormatDSN() string {
var buf bytes.Buffer
@@ -196,7 +248,7 @@ func (cfg *Config) FormatDSN() string {
// /dbname
buf.WriteByte('/')
- buf.WriteString(cfg.DBName)
+ buf.WriteString(url.PathEscape(cfg.DBName))
// [?param1=value1&...&paramN=valueN]
hasParam := false
@@ -230,7 +282,7 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
}
- if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ if col := cfg.Collation; col != "" {
writeDSNParam(&buf, &hasParam, "collation", col)
}
@@ -254,6 +306,10 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "parseTime", "true")
}
+ if cfg.timeTruncate > 0 {
+ writeDSNParam(&buf, &hasParam, "timeTruncate", cfg.timeTruncate.String())
+ }
+
if cfg.ReadTimeout > 0 {
writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String())
}
@@ -358,7 +414,11 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
break
}
}
- cfg.DBName = dsn[i+1 : j]
+
+ dbname := dsn[i+1 : j]
+ if cfg.DBName, err = url.PathUnescape(dbname); err != nil {
+ return nil, fmt.Errorf("invalid dbname %q: %w", dbname, err)
+ }
break
}
@@ -378,13 +438,13 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
// Values must be url.QueryEscape'ed
func parseDSNParams(cfg *Config, params string) (err error) {
for _, v := range strings.Split(params, "&") {
- param := strings.SplitN(v, "=", 2)
- if len(param) != 2 {
+ key, value, found := strings.Cut(v, "=")
+ if !found {
continue
}
// cfg params
- switch value := param[1]; param[0] {
+ switch key {
// Disable INFILE allowlist / enable all files
case "allowAllFiles":
var isBool bool
@@ -490,6 +550,13 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
+ // time.Time truncation
+ case "timeTruncate":
+ cfg.timeTruncate, err = time.ParseDuration(value)
+ if err != nil {
+ return fmt.Errorf("invalid timeTruncate value: %v, error: %w", value, err)
+ }
+
// I/O read Timeout
case "readTimeout":
cfg.ReadTimeout, err = time.ParseDuration(value)
@@ -554,13 +621,22 @@ func parseDSNParams(cfg *Config, params string) (err error) {
if err != nil {
return
}
+
+ // Connection attributes
+ case "connectionAttributes":
+ connectionAttributes, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid connectionAttributes value: %v", err)
+ }
+ cfg.ConnectionAttributes = connectionAttributes
+
default:
// lazy init
if cfg.Params == nil {
cfg.Params = make(map[string]string)
}
- if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ if cfg.Params[key], err = url.QueryUnescape(value); err != nil {
return
}
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_fuzz_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_fuzz_test.go
new file mode 100644
index 0000000..04c56ad
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_fuzz_test.go
@@ -0,0 +1,47 @@
+//go:build go1.18
+// +build go1.18
+
+package mysql
+
+import (
+ "net"
+ "testing"
+)
+
+func FuzzFormatDSN(f *testing.F) {
+ for _, test := range testDSNs { // See dsn_test.go
+ f.Add(test.in)
+ }
+
+ f.Fuzz(func(t *testing.T, dsn1 string) {
+ // Do not waste resources
+ if len(dsn1) > 1000 {
+ t.Skip("ignore: too long")
+ }
+
+ cfg1, err := ParseDSN(dsn1)
+ if err != nil {
+ t.Skipf("invalid DSN: %v", err)
+ }
+
+ dsn2 := cfg1.FormatDSN()
+ if dsn2 == dsn1 {
+ return
+ }
+
+ // Skip known cases of bad config that are not strictly checked by ParseDSN
+ if _, _, err := net.SplitHostPort(cfg1.Addr); err != nil {
+ t.Skipf("invalid addr %q: %v", cfg1.Addr, err)
+ }
+
+ cfg2, err := ParseDSN(dsn2)
+ if err != nil {
+ t.Fatalf("%q rewritten as %q: %v", dsn1, dsn2, err)
+ }
+
+ dsn3 := cfg2.FormatDSN()
+ if dsn3 != dsn2 {
+ t.Errorf("%q rewritten as %q", dsn2, dsn3)
+ }
+ })
+}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_test.go
index 41a6a29..dd8cd93 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_test.go
@@ -22,71 +22,80 @@ var testDSNs = []struct {
out *Config
}{{
"username:password@protocol(address)/dbname?param=value",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true},
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true},
}, {
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true, MultiStatements: true},
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true, MultiStatements: true},
}, {
"user@unix(/path/to/socket)/dbname?charset=utf8",
- &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "true"},
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "true"},
}, {
"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "skip-verify"},
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "skip-verify"},
}, {
"user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216&tls=false&allowCleartextPasswords=true&parseTime=true&rejectReadOnly=true",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, TLSConfig: "false", AllowCleartextPasswords: true, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, CheckConnLiveness: true, ClientFoundRows: true, MaxAllowedPacket: 16777216, ParseTime: true, RejectReadOnly: true},
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, TLSConfig: "false", AllowCleartextPasswords: true, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, Logger: defaultLogger, AllowAllFiles: true, AllowOldPasswords: true, CheckConnLiveness: true, ClientFoundRows: true, MaxAllowedPacket: 16777216, ParseTime: true, RejectReadOnly: true},
}, {
"user:password@/dbname?allowNativePasswords=false&checkConnLiveness=false&maxAllowedPacket=0&allowFallbackToPlaintext=true",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: 0, AllowFallbackToPlaintext: true, AllowNativePasswords: false, CheckConnLiveness: false},
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Loc: time.UTC, MaxAllowedPacket: 0, Logger: defaultLogger, AllowFallbackToPlaintext: true, AllowNativePasswords: false, CheckConnLiveness: false},
}, {
"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
- &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"/dbname",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
+}, {
+ "/dbname%2Fwithslash",
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname/withslash", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"@/",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"/",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"user:p@/ssword@/",
- &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"unix/?arg=%2Fsome%2Fpath.ext",
- &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"tcp(127.0.0.1)/dbname",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"tcp(de:ad:be:ef::ca:fe)/dbname",
- &Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
+}, {
+ "user:password@/dbname?loc=UTC&timeout=30s&parseTime=true&timeTruncate=1h",
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Loc: time.UTC, Timeout: 30 * time.Second, ParseTime: true, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true, timeTruncate: time.Hour},
},
}
func TestDSNParser(t *testing.T) {
for i, tst := range testDSNs {
- cfg, err := ParseDSN(tst.in)
- if err != nil {
- t.Error(err.Error())
- }
+ t.Run(tst.in, func(t *testing.T) {
+ cfg, err := ParseDSN(tst.in)
+ if err != nil {
+ t.Error(err.Error())
+ return
+ }
- // pointer not static
- cfg.TLS = nil
+ // pointer not static
+ cfg.TLS = nil
- if !reflect.DeepEqual(cfg, tst.out) {
- t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out)
- }
+ if !reflect.DeepEqual(cfg, tst.out) {
+ t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out)
+ }
+ })
}
}
@@ -113,27 +122,39 @@ func TestDSNParserInvalid(t *testing.T) {
func TestDSNReformat(t *testing.T) {
for i, tst := range testDSNs {
- dsn1 := tst.in
- cfg1, err := ParseDSN(dsn1)
- if err != nil {
- t.Error(err.Error())
- continue
- }
- cfg1.TLS = nil // pointer not static
- res1 := fmt.Sprintf("%+v", cfg1)
-
- dsn2 := cfg1.FormatDSN()
- cfg2, err := ParseDSN(dsn2)
- if err != nil {
- t.Error(err.Error())
- continue
- }
- cfg2.TLS = nil // pointer not static
- res2 := fmt.Sprintf("%+v", cfg2)
+ t.Run(tst.in, func(t *testing.T) {
+ dsn1 := tst.in
+ cfg1, err := ParseDSN(dsn1)
+ if err != nil {
+ t.Error(err.Error())
+ return
+ }
+ cfg1.TLS = nil // pointer not static
+ res1 := fmt.Sprintf("%+v", cfg1)
- if res1 != res2 {
- t.Errorf("%d. %q does not match %q", i, res2, res1)
- }
+ dsn2 := cfg1.FormatDSN()
+ if dsn2 != dsn1 {
+ // Just log
+ t.Logf("%d. %q reformatted as %q", i, dsn1, dsn2)
+ }
+
+ cfg2, err := ParseDSN(dsn2)
+ if err != nil {
+ t.Error(err.Error())
+ return
+ }
+ cfg2.TLS = nil // pointer not static
+ res2 := fmt.Sprintf("%+v", cfg2)
+
+ if res1 != res2 {
+ t.Errorf("%d. %q does not match %q", i, res2, res1)
+ }
+
+ dsn3 := cfg2.FormatDSN()
+ if dsn3 != dsn2 {
+ t.Errorf("%d. %q does not match %q", i, dsn2, dsn3)
+ }
+ })
}
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors.go
index ff9a8f0..a7ef889 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors.go
@@ -21,7 +21,7 @@ var (
ErrMalformPkt = errors.New("malformed packet")
ErrNoTLS = errors.New("TLS requested but server does not support TLS")
ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
- ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication")
ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
@@ -37,20 +37,26 @@ var (
errBadConnNoWrite = errors.New("bad connection")
)
-var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
// Logger is used to log critical error messages.
type Logger interface {
- Print(v ...interface{})
+ Print(v ...any)
}
-// SetLogger is used to set the logger for critical errors.
+// NopLogger is a nop implementation of the Logger interface.
+type NopLogger struct{}
+
+// Print implements Logger interface.
+func (nl *NopLogger) Print(_ ...any) {}
+
+// SetLogger is used to set the default logger for critical errors.
// The initial logger is os.Stderr.
func SetLogger(logger Logger) error {
if logger == nil {
return errors.New("logger is nil")
}
- errLog = logger
+ defaultLogger = logger
return nil
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors_test.go
index 43213f9..53d6344 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors_test.go
@@ -16,9 +16,9 @@ import (
)
func TestErrorsSetLogger(t *testing.T) {
- previous := errLog
+ previous := defaultLogger
defer func() {
- errLog = previous
+ defaultLogger = previous
}()
// set up logger
@@ -28,7 +28,7 @@ func TestErrorsSetLogger(t *testing.T) {
// print
SetLogger(logger)
- errLog.Print("test")
+ defaultLogger.Print("test")
// check result
if actual := buffer.String(); actual != expected {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fields.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/fields.go
index e0654a8..2860842 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fields.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/fields.go
@@ -18,7 +18,7 @@ func (mf *mysqlField) typeDatabaseName() string {
case fieldTypeBit:
return "BIT"
case fieldTypeBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "TEXT"
}
return "BLOB"
@@ -37,6 +37,9 @@ func (mf *mysqlField) typeDatabaseName() string {
case fieldTypeGeometry:
return "GEOMETRY"
case fieldTypeInt24:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED MEDIUMINT"
+ }
return "MEDIUMINT"
case fieldTypeJSON:
return "JSON"
@@ -46,7 +49,7 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "INT"
case fieldTypeLongBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "LONGTEXT"
}
return "LONGBLOB"
@@ -56,7 +59,7 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "BIGINT"
case fieldTypeMediumBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "MEDIUMTEXT"
}
return "MEDIUMBLOB"
@@ -74,7 +77,12 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "SMALLINT"
case fieldTypeString:
- if mf.charSet == collations[binaryCollation] {
+ if mf.flags&flagEnum != 0 {
+ return "ENUM"
+ } else if mf.flags&flagSet != 0 {
+ return "SET"
+ }
+ if mf.charSet == binaryCollationID {
return "BINARY"
}
return "CHAR"
@@ -88,17 +96,17 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "TINYINT"
case fieldTypeTinyBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "TINYTEXT"
}
return "TINYBLOB"
case fieldTypeVarChar:
- if mf.charSet == collations[binaryCollation] {
+ if mf.charSet == binaryCollationID {
return "VARBINARY"
}
return "VARCHAR"
case fieldTypeVarString:
- if mf.charSet == collations[binaryCollation] {
+ if mf.charSet == binaryCollationID {
return "VARBINARY"
}
return "VARCHAR"
@@ -110,21 +118,23 @@ func (mf *mysqlField) typeDatabaseName() string {
}
var (
- scanTypeFloat32 = reflect.TypeOf(float32(0))
- scanTypeFloat64 = reflect.TypeOf(float64(0))
- scanTypeInt8 = reflect.TypeOf(int8(0))
- scanTypeInt16 = reflect.TypeOf(int16(0))
- scanTypeInt32 = reflect.TypeOf(int32(0))
- scanTypeInt64 = reflect.TypeOf(int64(0))
- scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
- scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
- scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
- scanTypeUint8 = reflect.TypeOf(uint8(0))
- scanTypeUint16 = reflect.TypeOf(uint16(0))
- scanTypeUint32 = reflect.TypeOf(uint32(0))
- scanTypeUint64 = reflect.TypeOf(uint64(0))
- scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
- scanTypeUnknown = reflect.TypeOf(new(interface{}))
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeString = reflect.TypeOf("")
+ scanTypeNullString = reflect.TypeOf(sql.NullString{})
+ scanTypeBytes = reflect.TypeOf([]byte{})
+ scanTypeUnknown = reflect.TypeOf(new(any))
)
type mysqlField struct {
@@ -187,12 +197,18 @@ func (mf *mysqlField) scanType() reflect.Type {
}
return scanTypeNullFloat
+ case fieldTypeBit, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB,
+ fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry:
+ if mf.charSet == binaryCollationID {
+ return scanTypeBytes
+ }
+ fallthrough
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
- fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
- fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
- fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
- fieldTypeTime:
- return scanTypeRawBytes
+ fieldTypeEnum, fieldTypeSet, fieldTypeJSON, fieldTypeTime:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeString
+ }
+ return scanTypeNullString
case fieldTypeDate, fieldTypeNewDate,
fieldTypeTimestamp, fieldTypeDateTime:
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.mod b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.mod
new file mode 100644
index 0000000..4629714
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.mod
@@ -0,0 +1,5 @@
+module github.com/go-sql-driver/mysql
+
+go 1.18
+
+require filippo.io/edwards25519 v1.1.0
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.sum b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.sum
new file mode 100644
index 0000000..359ca94
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.sum
@@ -0,0 +1,2 @@
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/infile.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/infile.go
index 3279dcf..0c8af9f 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/infile.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/infile.go
@@ -93,7 +93,7 @@ func deferredClose(err *error, closer io.Closer) {
const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
-func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+func (mc *okHandler) handleInFileRequest(name string) (err error) {
var rdr io.Reader
var data []byte
packetSize := defaultPacketSize
@@ -116,10 +116,10 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
defer deferredClose(&err, cl)
}
} else {
- err = fmt.Errorf("Reader '%s' is <nil>", name)
+ err = fmt.Errorf("reader '%s' is <nil>", name)
}
} else {
- err = fmt.Errorf("Reader '%s' is not registered", name)
+ err = fmt.Errorf("reader '%s' is not registered", name)
}
} else { // File
name = strings.Trim(name, `"`)
@@ -154,7 +154,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
for err == nil {
n, err = rdr.Read(data[4:])
if n > 0 {
- if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ if ioErr := mc.conn().writePacket(data[:4+n]); ioErr != nil {
return ioErr
}
}
@@ -168,7 +168,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
if data == nil {
data = make([]byte, 4)
}
- if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ if ioErr := mc.conn().writePacket(data[:4]); ioErr != nil {
return ioErr
}
@@ -177,6 +177,6 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
return mc.readResultOK()
}
- mc.readPacket()
+ mc.conn().readPacket()
return err
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime.go
index 36c8a42..316a48a 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime.go
@@ -38,7 +38,7 @@ type NullTime sql.NullTime
// Scan implements the Scanner interface.
// The value type must be time.Time or string / []byte (formatted time-string),
// otherwise Scan fails.
-func (nt *NullTime) Scan(value interface{}) (err error) {
+func (nt *NullTime) Scan(value any) (err error) {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return
@@ -59,7 +59,7 @@ func (nt *NullTime) Scan(value interface{}) (err error) {
}
nt.Valid = false
- return fmt.Errorf("Can't convert %T to time.Time", value)
+ return fmt.Errorf("can't convert %T to time.Time", value)
}
// Value implements the driver Valuer interface.
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime_test.go
index a14ec06..4f1d902 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime_test.go
@@ -23,7 +23,7 @@ var (
func TestScanNullTime(t *testing.T) {
var scanTests = []struct {
- in interface{}
+ in any
error bool
valid bool
time time.Time
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets.go
index ee05c95..90a3472 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets.go
@@ -14,10 +14,10 @@ import (
"database/sql/driver"
"encoding/binary"
"encoding/json"
- "errors"
"fmt"
"io"
"math"
+ "strconv"
"time"
)
@@ -34,7 +34,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
- errLog.Print(err)
+ mc.log(err)
mc.Close()
return nil, ErrInvalidConn
}
@@ -44,6 +44,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// check packet sync [8 bit]
if data[3] != mc.sequence {
+ mc.Close()
if data[3] > mc.sequence {
return nil, ErrPktSyncMul
}
@@ -56,7 +57,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if pktLen == 0 {
// there was no previous packet
if prevData == nil {
- errLog.Print(ErrMalformPkt)
+ mc.log(ErrMalformPkt)
mc.Close()
return nil, ErrInvalidConn
}
@@ -70,7 +71,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
- errLog.Print(err)
+ mc.log(err)
mc.Close()
return nil, ErrInvalidConn
}
@@ -97,34 +98,6 @@ func (mc *mysqlConn) writePacket(data []byte) error {
return ErrPktTooLarge
}
- // Perform a stale connection check. We only perform this check for
- // the first query on a connection that has been checked out of the
- // connection pool: a fresh connection from the pool is more likely
- // to be stale, and it has not performed any previous writes that
- // could cause data corruption, so it's safe to return ErrBadConn
- // if the check fails.
- if mc.reset {
- mc.reset = false
- conn := mc.netConn
- if mc.rawConn != nil {
- conn = mc.rawConn
- }
- var err error
- if mc.cfg.CheckConnLiveness {
- if mc.cfg.ReadTimeout != 0 {
- err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
- }
- if err == nil {
- err = connCheck(conn)
- }
- }
- if err != nil {
- errLog.Print("closing bad idle connection: ", err)
- mc.Close()
- return driver.ErrBadConn
- }
- }
-
for {
var size int
if pktLen >= maxPacketSize {
@@ -161,7 +134,7 @@ func (mc *mysqlConn) writePacket(data []byte) error {
// Handle error
if err == nil { // n != len(data)
mc.cleanup()
- errLog.Print(ErrMalformPkt)
+ mc.log(ErrMalformPkt)
} else {
if cerr := mc.canceled.Value(); cerr != nil {
return cerr
@@ -171,7 +144,7 @@ func (mc *mysqlConn) writePacket(data []byte) error {
return errBadConnNoWrite
}
mc.cleanup()
- errLog.Print(err)
+ mc.log(err)
}
return ErrInvalidConn
}
@@ -239,7 +212,7 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro
// reserved (all [00]) [10 bytes]
pos += 1 + 2 + 2 + 1 + 10
- // second part of the password cipher [mininum 13 bytes],
+ // second part of the password cipher [minimum 13 bytes],
// where len=MAX(13, length of auth-plugin-data - 8)
//
// The web documentation is ambiguous about the length. However,
@@ -285,6 +258,7 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
clientLocalFiles |
clientPluginAuth |
clientMultiResults |
+ clientConnectAttrs |
mc.flags&clientLongFlag
if mc.cfg.ClientFoundRows {
@@ -318,11 +292,17 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
pktLen += n + 1
}
+ // encode length of the connection attributes
+ var connAttrsLEIBuf [9]byte
+ connAttrsLen := len(mc.connector.encodedAttributes)
+ connAttrsLEI := appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen))
+ pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes)
+
// Calculate packet length and get buffer with that size
- data, err := mc.buf.takeSmallBuffer(pktLen + 4)
+ data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -338,14 +318,18 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
data[10] = 0x00
data[11] = 0x00
- // Charset [1 byte]
+ // Collation ID [1 byte]
+ cname := mc.cfg.Collation
+ if cname == "" {
+ cname = defaultCollation
+ }
var found bool
- data[12], found = collations[mc.cfg.Collation]
+ data[12], found = collations[cname]
if !found {
// Note possibility for false negatives:
// could be triggered although the collation is valid if the
// collations map does not contain entries the server supports.
- return errors.New("unknown collation")
+ return fmt.Errorf("unknown collation: %q", cname)
}
// Filler [23 bytes] (all 0x00)
@@ -367,7 +351,6 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
if err := tlsConn.Handshake(); err != nil {
return err
}
- mc.rawConn = mc.netConn
mc.netConn = tlsConn
mc.buf.nc = tlsConn
}
@@ -394,6 +377,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
data[pos] = 0x00
pos++
+ // Connection Attributes
+ pos += copy(data[pos:], connAttrsLEI)
+ pos += copy(data[pos:], []byte(mc.connector.encodedAttributes))
+
// Send Auth packet
return mc.writePacket(data[:pos])
}
@@ -404,7 +391,7 @@ func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
data, err := mc.buf.takeSmallBuffer(pktLen)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -424,7 +411,7 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error {
data, err := mc.buf.takeSmallBuffer(4 + 1)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -443,7 +430,7 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -464,7 +451,7 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -495,7 +482,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
switch data[0] {
case iOK:
- return nil, "", mc.handleOkPacket(data)
+ // resultUnchanged, since auth happens before any queries or
+ // commands have been executed.
+ return nil, "", mc.resultUnchanged().handleOkPacket(data)
case iAuthMoreData:
return data[1:], "", err
@@ -518,9 +507,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
}
}
-// Returns error if Packet is not an 'Result OK'-Packet
-func (mc *mysqlConn) readResultOK() error {
- data, err := mc.readPacket()
+// Returns error if Packet is not a 'Result OK'-Packet
+func (mc *okHandler) readResultOK() error {
+ data, err := mc.conn().readPacket()
if err != nil {
return err
}
@@ -528,13 +517,17 @@ func (mc *mysqlConn) readResultOK() error {
if data[0] == iOK {
return mc.handleOkPacket(data)
}
- return mc.handleErrorPacket(data)
+ return mc.conn().handleErrorPacket(data)
}
// Result Set Header Packet
// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
-func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
- data, err := mc.readPacket()
+func (mc *okHandler) readResultSetHeaderPacket() (int, error) {
+ // handleOkPacket replaces both values; other cases leave the values unchanged.
+ mc.result.affectedRows = append(mc.result.affectedRows, 0)
+ mc.result.insertIds = append(mc.result.insertIds, 0)
+
+ data, err := mc.conn().readPacket()
if err == nil {
switch data[0] {
@@ -542,19 +535,16 @@ func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
return 0, mc.handleOkPacket(data)
case iERR:
- return 0, mc.handleErrorPacket(data)
+ return 0, mc.conn().handleErrorPacket(data)
case iLocalInFile:
return 0, mc.handleInFileRequest(string(data[1:]))
}
// column count
- num, _, n := readLengthEncodedInteger(data)
- if n-len(data) == 0 {
- return int(num), nil
- }
-
- return 0, ErrMalformPkt
+ num, _, _ := readLengthEncodedInteger(data)
+ // ignore remaining data in the packet. see #1478.
+ return int(num), nil
}
return 0, err
}
@@ -607,18 +597,61 @@ func readStatus(b []byte) statusFlag {
return statusFlag(b[0]) | statusFlag(b[1])<<8
}
+// Returns an instance of okHandler for codepaths where mysqlConn.result doesn't
+// need to be cleared first (e.g. during authentication, or while additional
+// resultsets are being fetched.)
+func (mc *mysqlConn) resultUnchanged() *okHandler {
+ return (*okHandler)(mc)
+}
+
+// okHandler represents the state of the connection when mysqlConn.result has
+// been prepared for processing of OK packets.
+//
+// To correctly populate mysqlConn.result (updated by handleOkPacket()), all
+// callpaths must either:
+//
+// 1. first clear it using clearResult(), or
+// 2. confirm that they don't need to (by calling resultUnchanged()).
+//
+// Both return an instance of type *okHandler.
+type okHandler mysqlConn
+
+// Exposes the underlying type's methods.
+func (mc *okHandler) conn() *mysqlConn {
+ return (*mysqlConn)(mc)
+}
+
+// clearResult clears the connection's stored affectedRows and insertIds
+// fields.
+//
+// It returns a handler that can process OK responses.
+func (mc *mysqlConn) clearResult() *okHandler {
+ mc.result = mysqlResult{}
+ return (*okHandler)(mc)
+}
+
// Ok Packet
// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
-func (mc *mysqlConn) handleOkPacket(data []byte) error {
+func (mc *okHandler) handleOkPacket(data []byte) error {
var n, m int
+ var affectedRows, insertId uint64
// 0x00 [1 byte]
// Affected rows [Length Coded Binary]
- mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+ affectedRows, _, n = readLengthEncodedInteger(data[1:])
// Insert id [Length Coded Binary]
- mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+ insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // Update for the current statement result (only used by
+ // readResultSetHeaderPacket).
+ if len(mc.result.affectedRows) > 0 {
+ mc.result.affectedRows[len(mc.result.affectedRows)-1] = int64(affectedRows)
+ }
+ if len(mc.result.insertIds) > 0 {
+ mc.result.insertIds[len(mc.result.insertIds)-1] = int64(insertId)
+ }
// server_status [2 bytes]
mc.status = readStatus(data[1+n+m : 1+n+m+2])
@@ -769,7 +802,8 @@ func (rows *textRows) readRow(dest []driver.Value) error {
for i := range dest {
// Read bytes and convert to string
- dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ var buf []byte
+ buf, isNull, n, err = readLengthEncodedString(data[pos:])
pos += n
if err != nil {
@@ -781,19 +815,40 @@ func (rows *textRows) readRow(dest []driver.Value) error {
continue
}
- if !mc.parseTime {
- continue
- }
-
- // Parse time field
switch rows.rs.columns[i].fieldType {
case fieldTypeTimestamp,
fieldTypeDateTime,
fieldTypeDate,
fieldTypeNewDate:
- if dest[i], err = parseDateTime(dest[i].([]byte), mc.cfg.Loc); err != nil {
- return err
+ if mc.parseTime {
+ dest[i], err = parseDateTime(buf, mc.cfg.Loc)
+ } else {
+ dest[i] = buf
}
+
+ case fieldTypeTiny, fieldTypeShort, fieldTypeInt24, fieldTypeYear, fieldTypeLong:
+ dest[i], err = strconv.ParseInt(string(buf), 10, 64)
+
+ case fieldTypeLongLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i], err = strconv.ParseUint(string(buf), 10, 64)
+ } else {
+ dest[i], err = strconv.ParseInt(string(buf), 10, 64)
+ }
+
+ case fieldTypeFloat:
+ var d float64
+ d, err = strconv.ParseFloat(string(buf), 32)
+ dest[i] = float32(d)
+
+ case fieldTypeDouble:
+ dest[i], err = strconv.ParseFloat(string(buf), 64)
+
+ default:
+ dest[i] = buf
+ }
+ if err != nil {
+ return err
}
}
@@ -938,7 +993,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -1116,7 +1171,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if v.IsZero() {
b = append(b, "0000-00-00"...)
} else {
- b, err = appendDateTime(b, v.In(mc.cfg.Loc))
+ b, err = appendDateTime(b, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
if err != nil {
return err
}
@@ -1137,7 +1192,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if valuesCap != cap(paramValues) {
data = append(data[:pos], paramValues...)
if err = mc.buf.store(data); err != nil {
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
}
@@ -1149,7 +1204,9 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
return mc.writePacket(data)
}
-func (mc *mysqlConn) discardResults() error {
+// For each remaining resultset in the stream, discards its rows and updates
+// mc.affectedRows and mc.insertIds.
+func (mc *okHandler) discardResults() error {
for mc.status&statusMoreResultsExists != 0 {
resLen, err := mc.readResultSetHeaderPacket()
if err != nil {
@@ -1157,11 +1214,11 @@ func (mc *mysqlConn) discardResults() error {
}
if resLen > 0 {
// columns
- if err := mc.readUntilEOF(); err != nil {
+ if err := mc.conn().readUntilEOF(); err != nil {
return err
}
// rows
- if err := mc.readUntilEOF(); err != nil {
+ if err := mc.conn().readUntilEOF(); err != nil {
return err
}
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets_test.go
index b61e4db..fa4683e 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets_test.go
@@ -96,9 +96,11 @@ var _ net.Conn = new(mockConn)
func newRWMockConn(sequence uint8) (*mockConn, *mysqlConn) {
conn := new(mockConn)
+ connector := newConnector(NewConfig())
mc := &mysqlConn{
buf: newBuffer(conn),
- cfg: NewConfig(),
+ cfg: connector.cfg,
+ connector: connector,
netConn: conn,
closech: make(chan struct{}),
maxAllowedPacket: defaultMaxAllowedPacket,
@@ -128,30 +130,34 @@ func TestReadPacketSingleByte(t *testing.T) {
}
func TestReadPacketWrongSequenceID(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- }
-
- // too low sequence id
- conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
- conn.maxReads = 1
- mc.sequence = 1
- _, err := mc.readPacket()
- if err != ErrPktSync {
- t.Errorf("expected ErrPktSync, got %v", err)
- }
-
- // reset
- conn.reads = 0
- mc.sequence = 0
- mc.buf = newBuffer(conn)
-
- // too high sequence id
- conn.data = []byte{0x01, 0x00, 0x00, 0x42, 0xff}
- _, err = mc.readPacket()
- if err != ErrPktSyncMul {
- t.Errorf("expected ErrPktSyncMul, got %v", err)
+ for _, testCase := range []struct {
+ ClientSequenceID byte
+ ServerSequenceID byte
+ ExpectedErr error
+ }{
+ {
+ ClientSequenceID: 1,
+ ServerSequenceID: 0,
+ ExpectedErr: ErrPktSync,
+ },
+ {
+ ClientSequenceID: 0,
+ ServerSequenceID: 0x42,
+ ExpectedErr: ErrPktSyncMul,
+ },
+ } {
+ conn, mc := newRWMockConn(testCase.ClientSequenceID)
+
+ conn.data = []byte{0x01, 0x00, 0x00, testCase.ServerSequenceID, 0xff}
+ _, err := mc.readPacket()
+ if err != testCase.ExpectedErr {
+ t.Errorf("expected %v, got %v", testCase.ExpectedErr, err)
+ }
+
+ // connection should not be returned to the pool in this state
+ if mc.IsValid() {
+ t.Errorf("expected IsValid() to be false")
+ }
}
}
@@ -179,7 +185,7 @@ func TestReadPacketSplit(t *testing.T) {
data[4] = 0x11
data[maxPacketSize+3] = 0x22
- // 2nd packet has payload length 0 and squence id 1
+ // 2nd packet has payload length 0 and sequence id 1
// 00 00 00 01
data[pkt2ofs+3] = 0x01
@@ -211,7 +217,7 @@ func TestReadPacketSplit(t *testing.T) {
data[pkt2ofs+4] = 0x33
data[pkt2ofs+maxPacketSize+3] = 0x44
- // 3rd packet has payload length 0 and squence id 2
+ // 3rd packet has payload length 0 and sequence id 2
// 00 00 00 02
data[pkt3ofs+3] = 0x02
@@ -265,6 +271,7 @@ func TestReadPacketFail(t *testing.T) {
mc := &mysqlConn{
buf: newBuffer(conn),
closech: make(chan struct{}),
+ cfg: NewConfig(),
}
// illegal empty (stand-alone) packet
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/result.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/result.go
new file mode 100644
index 0000000..d516314
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/result.go
@@ -0,0 +1,50 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import "database/sql/driver"
+
+// Result exposes data not available through *connection.Result.
+//
+// This is accessible by executing statements using sql.Conn.Raw() and
+// downcasting the returned result:
+//
+// res, err := rawConn.Exec(...)
+// res.(mysql.Result).AllRowsAffected()
+type Result interface {
+ driver.Result
+ // AllRowsAffected returns a slice containing the affected rows for each
+ // executed statement.
+ AllRowsAffected() []int64
+ // AllLastInsertIds returns a slice containing the last inserted ID for each
+ // executed statement.
+ AllLastInsertIds() []int64
+}
+
+type mysqlResult struct {
+ // One entry in both slices is created for every executed statement result.
+ affectedRows []int64
+ insertIds []int64
+}
+
+func (res *mysqlResult) LastInsertId() (int64, error) {
+ return res.insertIds[len(res.insertIds)-1], nil
+}
+
+func (res *mysqlResult) RowsAffected() (int64, error) {
+ return res.affectedRows[len(res.affectedRows)-1], nil
+}
+
+func (res *mysqlResult) AllLastInsertIds() []int64 {
+ return append([]int64{}, res.insertIds...) // defensive copy
+}
+
+func (res *mysqlResult) AllRowsAffected() []int64 {
+ return append([]int64{}, res.affectedRows...) // defensive copy
+}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/rows.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/rows.go
index 888bdb5..81fa606 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/rows.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/rows.go
@@ -123,7 +123,8 @@ func (rows *mysqlRows) Close() (err error) {
err = mc.readUntilEOF()
}
if err == nil {
- if err = mc.discardResults(); err != nil {
+ handleOk := mc.clearResult()
+ if err = handleOk.discardResults(); err != nil {
return err
}
}
@@ -160,7 +161,15 @@ func (rows *mysqlRows) nextResultSet() (int, error) {
return 0, io.EOF
}
rows.rs = resultSet{}
- return rows.mc.readResultSetHeaderPacket()
+ // rows.mc.affectedRows and rows.mc.insertIds accumulate on each call to
+ // nextResultSet.
+ resLen, err := rows.mc.resultUnchanged().readResultSetHeaderPacket()
+ if err != nil {
+ // Clean up about multi-results flag
+ rows.rs.done = true
+ rows.mc.status = rows.mc.status & (^statusMoreResultsExists)
+ }
+ return resLen, err
}
func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement.go
index 10ece8b..0436f22 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement.go
@@ -51,7 +51,7 @@ func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
if stmt.mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ stmt.mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@@ -61,12 +61,10 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
}
mc := stmt.mc
-
- mc.affectedRows = 0
- mc.insertId = 0
+ handleOk := stmt.mc.clearResult()
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
@@ -83,14 +81,12 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
}
}
- if err := mc.discardResults(); err != nil {
+ if err := handleOk.discardResults(); err != nil {
return nil, err
}
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, nil
+ copied := mc.result
+ return &copied, nil
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
@@ -99,7 +95,7 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
if stmt.mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ stmt.mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@@ -111,7 +107,8 @@ func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
mc := stmt.mc
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ handleOk := stmt.mc.clearResult()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
@@ -144,7 +141,7 @@ type converter struct{}
// implementation does not. This function should be kept in sync with
// database/sql/driver defaultConverter.ConvertValue() except for that
// deliberate difference.
-func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+func (c converter) ConvertValue(v any) (driver.Value, error) {
if driver.IsValue(v) {
return v, nil
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement_test.go
index 2563ece..15f9d7c 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement_test.go
@@ -77,7 +77,7 @@ func TestConvertPointer(t *testing.T) {
}
func TestConvertSignedIntegers(t *testing.T) {
- values := []interface{}{
+ values := []any{
int8(-42),
int16(-42),
int32(-42),
@@ -106,7 +106,7 @@ func (u myUint64) Value() (driver.Value, error) {
}
func TestConvertUnsignedIntegers(t *testing.T) {
- values := []interface{}{
+ values := []any{
uint8(42),
uint16(42),
uint32(42),
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/transaction.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/transaction.go
index 4a4b610..4a4b610 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/transaction.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/transaction.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils.go
index 15dbd8d..cda24fe 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils.go
@@ -36,7 +36,7 @@ var (
// registering it.
//
// rootCertPool := x509.NewCertPool()
-// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// pem, err := os.ReadFile("/path/ca-cert.pem")
// if err != nil {
// log.Fatal(err)
// }
@@ -265,7 +265,11 @@ func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Va
return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
}
-func appendDateTime(buf []byte, t time.Time) ([]byte, error) {
+func appendDateTime(buf []byte, t time.Time, timeTruncate time.Duration) ([]byte, error) {
+ if timeTruncate > 0 {
+ t = t.Truncate(timeTruncate)
+ }
+
year, month, day := t.Date()
hour, min, sec := t.Clock()
nsec := t.Nanosecond()
@@ -616,6 +620,11 @@ func appendLengthEncodedInteger(b []byte, n uint64) []byte {
byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
}
+func appendLengthEncodedString(b []byte, s string) []byte {
+ b = appendLengthEncodedInteger(b, uint64(len(s)))
+ return append(b, s...)
+}
+
// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
// If cap(buf) is not enough, reallocate new buffer.
func reserveBuffer(buf []byte, appendSize int) []byte {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils_test.go
index 4e5fc3c..80aebdd 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils_test.go
@@ -237,8 +237,10 @@ func TestIsolationLevelMapping(t *testing.T) {
func TestAppendDateTime(t *testing.T) {
tests := []struct {
- t time.Time
- str string
+ t time.Time
+ str string
+ timeTruncate time.Duration
+ expectedErr bool
}{
{
t: time.Date(1234, 5, 6, 0, 0, 0, 0, time.UTC),
@@ -276,34 +278,75 @@ func TestAppendDateTime(t *testing.T) {
t: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC),
str: "0001-01-01",
},
+ // Truncated time
+ {
+ t: time.Date(1234, 5, 6, 0, 0, 0, 0, time.UTC),
+ str: "1234-05-06",
+ timeTruncate: time.Second,
+ },
+ {
+ t: time.Date(4567, 12, 31, 12, 0, 0, 0, time.UTC),
+ str: "4567-12-31 12:00:00",
+ timeTruncate: time.Minute,
+ },
+ {
+ t: time.Date(2020, 5, 30, 12, 34, 0, 0, time.UTC),
+ str: "2020-05-30 12:34:00",
+ timeTruncate: 0,
+ },
+ {
+ t: time.Date(2020, 5, 30, 12, 34, 56, 0, time.UTC),
+ str: "2020-05-30 12:34:56",
+ timeTruncate: time.Second,
+ },
+ {
+ t: time.Date(2020, 5, 30, 22, 33, 44, 123000000, time.UTC),
+ str: "2020-05-30 22:33:44",
+ timeTruncate: time.Second,
+ },
+ {
+ t: time.Date(2020, 5, 30, 22, 33, 44, 123456000, time.UTC),
+ str: "2020-05-30 22:33:44.123",
+ timeTruncate: time.Millisecond,
+ },
+ {
+ t: time.Date(2020, 5, 30, 22, 33, 44, 123456789, time.UTC),
+ str: "2020-05-30 22:33:44",
+ timeTruncate: time.Second,
+ },
+ {
+ t: time.Date(9999, 12, 31, 23, 59, 59, 999999999, time.UTC),
+ str: "9999-12-31 23:59:59.999999999",
+ timeTruncate: 0,
+ },
+ {
+ t: time.Date(1, 1, 1, 1, 1, 1, 1, time.UTC),
+ str: "0001-01-01",
+ timeTruncate: 365 * 24 * time.Hour,
+ },
+ // year out of range
+ {
+ t: time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC),
+ expectedErr: true,
+ },
+ {
+ t: time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC),
+ expectedErr: true,
+ },
}
for _, v := range tests {
buf := make([]byte, 0, 32)
- buf, _ = appendDateTime(buf, v.t)
+ buf, err := appendDateTime(buf, v.t, v.timeTruncate)
+ if err != nil {
+ if !v.expectedErr {
+ t.Errorf("appendDateTime(%v) returned an errror: %v", v.t, err)
+ }
+ continue
+ }
if str := string(buf); str != v.str {
t.Errorf("appendDateTime(%v), have: %s, want: %s", v.t, str, v.str)
}
}
-
- // year out of range
- {
- v := time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC)
- buf := make([]byte, 0, 32)
- _, err := appendDateTime(buf, v)
- if err == nil {
- t.Error("want an error")
- return
- }
- }
- {
- v := time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC)
- buf := make([]byte, 0, 32)
- _, err := appendDateTime(buf, v)
- if err == nil {
- t.Error("want an error")
- return
- }
- }
}
func TestParseDateTime(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.codecov.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.codecov.yml
index 8364eea..8364eea 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.codecov.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.codecov.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/FUNDING.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/FUNDING.yml
index ab4b632..ab4b632 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/FUNDING.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/FUNDING.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/bug_report.md b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000..1aad475
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,29 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: bug
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+
+Please provide a minimum yaml content that can be reproduced.
+We are more than happy to use [Go Playground](https://go.dev/play)
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Version Variables**
+ - Go version: [e.g. 1.21 ]
+ - go-yaml's Version: [e.g. v1.11.1 ]
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/feature_request.md b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000..e301d68
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: feature request
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/pull_request_template.md b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/pull_request_template.md
new file mode 100644
index 0000000..a1d6c8e
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/pull_request_template.md
@@ -0,0 +1,4 @@
+Before submitting your PR, please confirm the following.
+
+- [ ] Describe the purpose for which you created this PR.
+- [ ] Create test code that corresponds to the modification \ No newline at end of file
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/workflows/go.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/workflows/go.yml
index a935b3f..647c692 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/workflows/go.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/workflows/go.yml
@@ -10,15 +10,15 @@ jobs:
strategy:
matrix:
os: [ "ubuntu-latest", "macos-latest", "windows-latest" ]
- go-version: [ "1.18", "1.19", "1.20" ]
+ go-version: [ "1.19", "1.20", "1.21" ]
runs-on: ${{ matrix.os }}
steps:
+ - name: checkout
+ uses: actions/checkout@v4
- name: setup Go ${{ matrix.go-version }}
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go-version }}
- - name: checkout
- uses: actions/checkout@v3
- name: test
run: |
make test
@@ -28,17 +28,17 @@ jobs:
strategy:
matrix:
os: [ "ubuntu-latest", "windows-latest" ]
- go-version: [ "1.18", "1.19", "1.20" ]
+ go-version: [ "1.19", "1.20", "1.21" ]
runs-on: ${{ matrix.os }}
env:
GOARCH: "386"
steps:
+ - name: checkout
+ uses: actions/checkout@v4
- name: setup Go ${{ matrix.go-version }}
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go-version }}
- - name: checkout
- uses: actions/checkout@v3
- name: test
run: |
make simple-test
@@ -47,12 +47,12 @@ jobs:
name: ycat
runs-on: ubuntu-latest
steps:
+ - name: checkout
+ uses: actions/checkout@v4
- name: setup Go
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
- go-version: "1.20"
- - name: checkout
- uses: actions/checkout@v3
+ go-version: "1.21"
- name: build
run: |
make ycat/build
@@ -64,12 +64,12 @@ jobs:
name: Coverage
runs-on: ubuntu-latest
steps:
+ - name: checkout
+ uses: actions/checkout@v4
- name: setup Go
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
- go-version: "1.20"
- - name: checkout
- uses: actions/checkout@v3
+ go-version: "1.21"
- name: measure coverage
run: |
make cover
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/CHANGELOG.md b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/CHANGELOG.md
index 2887563..c8f820d 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/CHANGELOG.md
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/CHANGELOG.md
@@ -1,3 +1,26 @@
+# 1.11.2 - 2023-09-15
+
+### Fix bugs
+
+- Fix quoted comments ( #370 )
+- Fix handle of space at start or last ( #376 )
+- Fix sequence with comment ( #390 )
+
+# 1.11.1 - 2023-09-14
+
+### Fix bugs
+
+- Handle `\r` in a double-quoted string the same as `\n` ( #372 )
+- Replace loop with n.Values = append(n.Values, target.Values...) ( #380 )
+- Skip encoding an inline field if it is null ( #386 )
+- Fix comment parsing with null value ( #388 )
+
+# 1.11.0 - 2023-04-03
+
+### Features
+
+- Supports dynamically switch encode and decode processing for a given type
+
# 1.10.1 - 2023-03-28
### Features
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/LICENSE b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/LICENSE
index 04485ce..04485ce 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/LICENSE
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/LICENSE
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/Makefile b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/Makefile
index 1b1d923..1b1d923 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/Makefile
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/Makefile
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/README.md b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/README.md
index 9452349..9452349 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/README.md
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/README.md
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast.go
index f535a24..b4d5ec4 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast.go
@@ -1506,9 +1506,7 @@ func (n *SequenceNode) Replace(idx int, value Node) error {
func (n *SequenceNode) Merge(target *SequenceNode) {
column := n.Start.Position.Column - target.Start.Position.Column
target.AddColumn(column)
- for _, value := range target.Values {
- n.Values = append(n.Values, value)
- }
+ n.Values = append(n.Values, target.Values...)
}
// SetIsFlowStyle set value to IsFlowStyle field recursively.
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast_test.go
index e7022b6..e7022b6 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast_test.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/cmd/ycat/ycat.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/cmd/ycat/ycat.go
index c70cb3b..c70cb3b 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/cmd/ycat/ycat.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/cmd/ycat/ycat.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode.go
index d3dbabc..d3dbabc 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode_test.go
index 2549bea..cabfd33 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode_test.go
@@ -68,6 +68,18 @@ func TestDecoder(t *testing.T) {
map[string]string{"v": "1.234"},
},
{
+ "v: \" foo\"\n",
+ map[string]string{"v": " foo"},
+ },
+ {
+ "v: \"foo \"\n",
+ map[string]string{"v": "foo "},
+ },
+ {
+ "v: \" foo \"\n",
+ map[string]string{"v": " foo "},
+ },
+ {
"v: false\n",
map[string]bool{"v": false},
},
@@ -425,6 +437,14 @@ func TestDecoder(t *testing.T) {
`"1": "a\x2Fb\u002Fc\U0000002Fd"`,
map[interface{}]interface{}{"1": `a/b/c/d`},
},
+ {
+ "'1': \"2\\n3\"",
+ map[interface{}]interface{}{"1": "2\n3"},
+ },
+ {
+ "'1': \"2\\r\\n3\"",
+ map[interface{}]interface{}{"1": "2\r\n3"},
+ },
{
"a: -b_c",
@@ -841,6 +861,14 @@ func TestDecoder(t *testing.T) {
},
},
{
+ "v:\n- A\n- |-\n B\n C\n\n\n",
+ map[string][]string{
+ "v": {
+ "A", "B\nC",
+ },
+ },
+ },
+ {
"v:\n- A\n- >-\n B\n C\n",
map[string][]string{
"v": {
@@ -849,6 +877,14 @@ func TestDecoder(t *testing.T) {
},
},
{
+ "v:\n- A\n- >-\n B\n C\n\n\n",
+ map[string][]string{
+ "v": {
+ "A", "B C",
+ },
+ },
+ },
+ {
"a: b\nc: d\n",
struct {
A string
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode.go
index 7d8d81e..3b9b298 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode.go
@@ -823,6 +823,10 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column
}
mapNode, ok := value.(ast.MapNode)
if !ok {
+ // if an inline field is null, skip encoding it
+ if _, ok := value.(*ast.NullNode); ok {
+ continue
+ }
return nil, xerrors.Errorf("inline value is must be map or struct type")
}
mapIter := mapNode.MapRange()
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode_test.go
index 74b1aa5..3ff6f1c 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode_test.go
@@ -304,6 +304,21 @@ func TestEncoder(t *testing.T) {
nil,
},
{
+ "a: \" b\"\n",
+ map[string]string{"a": " b"},
+ nil,
+ },
+ {
+ "a: \"b \"\n",
+ map[string]string{"a": "b "},
+ nil,
+ },
+ {
+ "a: \" b \"\n",
+ map[string]string{"a": " b "},
+ nil,
+ },
+ {
"a: 100.5\n",
map[string]interface{}{
"a": 100.5,
@@ -980,6 +995,30 @@ c: true
}
}
+func TestEncoder_InlineNil(t *testing.T) {
+ type base struct {
+ A int
+ B string
+ }
+ var buf bytes.Buffer
+ enc := yaml.NewEncoder(&buf)
+ if err := enc.Encode(struct {
+ *base `yaml:",inline"`
+ C bool
+ }{
+ C: true,
+ }); err != nil {
+ t.Fatalf("%+v", err)
+ }
+ expect := `
+c: true
+`
+ actual := "\n" + buf.String()
+ if expect != actual {
+ t.Fatalf("inline marshal error: expect=[%s] actual=[%s]", expect, actual)
+ }
+}
+
func TestEncoder_Flow(t *testing.T) {
var buf bytes.Buffer
enc := yaml.NewEncoder(&buf, yaml.Flow(true))
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/error.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/error.go
index 163dcc5..163dcc5 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/error.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/error.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.mod b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.mod
index f6e74c3..4550ff3 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.mod
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.mod
@@ -1,6 +1,6 @@
module github.com/goccy/go-yaml
-go 1.18
+go 1.19
require (
github.com/fatih/color v1.10.0
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.sum b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.sum
index 7249df6..7249df6 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.sum
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.sum
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/internal/errors/error.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/internal/errors/error.go
index 7f1ea9a..7f1ea9a 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/internal/errors/error.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/internal/errors/error.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer.go
index 3207f4f..3207f4f 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer_test.go
index 0d8f648..0d8f648 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer_test.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/option.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/option.go
index eab5d43..eab5d43 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/option.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/option.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/context.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/context.go
index 99f18b1..42cc4f8 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/context.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/context.go
@@ -13,7 +13,6 @@ type context struct {
idx int
size int
tokens token.Tokens
- mode Mode
path string
}
@@ -56,7 +55,6 @@ func (c *context) copy() *context {
idx: c.idx,
size: c.size,
tokens: append(token.Tokens{}, c.tokens...),
- mode: c.mode,
path: c.path,
}
}
@@ -145,10 +143,6 @@ func (c *context) afterNextNotCommentToken() *token.Token {
return nil
}
-func (c *context) enabledComment() bool {
- return c.mode&ParseComments != 0
-}
-
func (c *context) isCurrentCommentToken() bool {
tk := c.currentToken()
if tk == nil {
@@ -193,7 +187,6 @@ func newContext(tokens token.Tokens, mode Mode) *context {
idx: 0,
size: len(filteredTokens),
tokens: token.Tokens(filteredTokens),
- mode: mode,
path: "$",
}
}
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser.go
index 568e6ad..13ada50 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser.go
@@ -156,15 +156,38 @@ func (p *parser) createMapValueNode(ctx *context, key ast.MapKeyNode, colonToken
ctx.insertToken(ctx.idx, nullToken)
return ast.Null(nullToken), nil
}
-
+ var comment *ast.CommentGroupNode
+ if tk.Type == token.CommentType {
+ comment = p.parseCommentOnly(ctx)
+ if comment != nil {
+ comment.SetPath(ctx.withChild(key.GetToken().Value).path)
+ }
+ tk = ctx.currentToken()
+ }
if tk.Position.Column == key.GetToken().Position.Column && tk.Type == token.StringType {
// in this case,
// ----
// key: <value does not defined>
// next
+
nullToken := p.createNullToken(colonToken)
ctx.insertToken(ctx.idx, nullToken)
- return ast.Null(nullToken), nil
+ nullNode := ast.Null(nullToken)
+
+ if comment != nil {
+ nullNode.SetComment(comment)
+ } else {
+ // If there is a comment, it is already bound to the key node,
+ // so remove the comment from the key to bind it to the null value.
+ keyComment := key.GetComment()
+ if keyComment != nil {
+ if err := key.SetComment(nil); err != nil {
+ return nil, err
+ }
+ nullNode.SetComment(keyComment)
+ }
+ }
+ return nullNode, nil
}
if tk.Position.Column < key.GetToken().Position.Column {
@@ -174,13 +197,20 @@ func (p *parser) createMapValueNode(ctx *context, key ast.MapKeyNode, colonToken
// next
nullToken := p.createNullToken(colonToken)
ctx.insertToken(ctx.idx, nullToken)
- return ast.Null(nullToken), nil
+ nullNode := ast.Null(nullToken)
+ if comment != nil {
+ nullNode.SetComment(comment)
+ }
+ return nullNode, nil
}
value, err := p.parseToken(ctx, ctx.currentToken())
if err != nil {
return nil, errors.Wrapf(err, "failed to parse mapping 'value' node")
}
+ if comment != nil {
+ value.SetComment(comment)
+ }
return value, nil
}
@@ -304,10 +334,9 @@ func (p *parser) parseSequenceEntry(ctx *context) (*ast.SequenceNode, error) {
if tk.Type == token.CommentType {
comment = p.parseCommentOnly(ctx)
tk = ctx.currentToken()
- if tk.Type != token.SequenceEntryType {
- break
+ if tk.Type == token.SequenceEntryType {
+ ctx.progress(1) // skip sequence token
}
- ctx.progress(1) // skip sequence token
}
value, err := p.parseToken(ctx.withIndex(uint(len(sequenceNode.Values))), ctx.currentToken())
if err != nil {
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser_test.go
index 6044699..8d697e8 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser_test.go
@@ -7,6 +7,7 @@ import (
"strings"
"testing"
+ "github.com/goccy/go-yaml"
"github.com/goccy/go-yaml/ast"
"github.com/goccy/go-yaml/lexer"
"github.com/goccy/go-yaml/parser"
@@ -808,6 +809,125 @@ foo: > # comment
}
}
+func TestCommentWithNull(t *testing.T) {
+ t.Run("same line", func(t *testing.T) {
+ content := `
+foo:
+ bar: # comment
+ baz: 1
+`
+ expected := `
+foo:
+ bar: null # comment
+ baz: 1`
+ f, err := parser.ParseBytes([]byte(content), parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(f.Docs) != 1 {
+ t.Fatal("failed to parse content with same line comment")
+ }
+ if f.Docs[0].String() != strings.TrimPrefix(expected, "\n") {
+ t.Fatal("failed to parse comment")
+ }
+ })
+ t.Run("next line", func(t *testing.T) {
+ content := `
+foo:
+ bar:
+ # comment
+ baz: 1
+`
+ expected := `
+foo:
+ bar: null # comment
+ baz: 1`
+ f, err := parser.ParseBytes([]byte(content), parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(f.Docs) != 1 {
+ t.Fatal("failed to parse content with next line comment")
+ }
+ if f.Docs[0].String() != strings.TrimPrefix(expected, "\n") {
+ t.Fatal("failed to parse comment")
+ }
+ })
+ t.Run("next line and different indent", func(t *testing.T) {
+ content := `
+foo:
+ bar:
+ # comment
+baz: 1`
+ f, err := parser.ParseBytes([]byte(content), parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(f.Docs) != 1 {
+ t.Fatal("failed to parse content with next line comment")
+ }
+ expected := `
+foo:
+ bar: null # comment
+baz: 1`
+ if f.Docs[0].String() != strings.TrimPrefix(expected, "\n") {
+ t.Fatal("failed to parse comment")
+ }
+ })
+}
+
+func TestSequenceComment(t *testing.T) {
+ content := `
+foo:
+ - # comment
+ bar: 1
+baz:
+ - xxx
+`
+ f, err := parser.ParseBytes([]byte(content), parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(f.Docs) != 1 {
+ t.Fatal("failed to parse content with next line with sequence")
+ }
+ expected := `
+foo:
+ # comment
+ - bar: 1
+baz:
+ - xxx`
+ if f.Docs[0].String() != strings.TrimPrefix(expected, "\n") {
+ t.Fatal("failed to parse comment")
+ }
+ t.Run("foo[0].bar", func(t *testing.T) {
+ path, err := yaml.PathString("$.foo[0].bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ v, err := path.FilterFile(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if v.String() != "1" {
+ t.Fatal("failed to get foo[0].bar value")
+ }
+ })
+ t.Run("baz[0]", func(t *testing.T) {
+ path, err := yaml.PathString("$.baz[0]")
+ if err != nil {
+ t.Fatal(err)
+ }
+ v, err := path.FilterFile(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if v.String() != "xxx" {
+ t.Fatal("failed to get baz[0] value")
+ }
+ })
+}
+
func TestNodePath(t *testing.T) {
yml := `
a: # commentA
@@ -824,6 +944,8 @@ a: # commentA
i: fuga # commentI
j: piyo # commentJ
k.l.m.n: moge # commentKLMN
+o#p: hogera # commentOP
+q#.r: hogehoge # commentQR
`
f, err := parser.ParseBytes([]byte(yml), parser.ParseComments)
if err != nil {
@@ -854,6 +976,8 @@ k.l.m.n: moge # commentKLMN
"$.a.i",
"$.j",
"$.'k.l.m.n'",
+ "$.o#p",
+ "$.'q#.r'",
}
if !reflect.DeepEqual(expectedPaths, commentPaths) {
t.Fatalf("failed to get YAMLPath to the comment node:\nexpected[%s]\ngot [%s]", expectedPaths, commentPaths)
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/cr.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/cr.yml
index 37b52a6..37b52a6 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/cr.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/cr.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/crlf.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/crlf.yml
index 85929f9..85929f9 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/crlf.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/crlf.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/lf.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/lf.yml
index d2fe51f..d2fe51f 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/lf.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/lf.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path.go
index 7a0c3b1..72554bd 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path.go
@@ -500,11 +500,29 @@ func newSelectorNode(selector string) *selectorNode {
}
func (n *selectorNode) filter(node ast.Node) (ast.Node, error) {
+ selector := n.selector
+ if len(selector) > 1 && selector[0] == '\'' && selector[len(selector)-1] == '\'' {
+ selector = selector[1 : len(selector)-1]
+ }
switch node.Type() {
case ast.MappingType:
for _, value := range node.(*ast.MappingNode).Values {
key := value.Key.GetToken().Value
- if key == n.selector {
+ if len(key) > 0 {
+ switch key[0] {
+ case '"':
+ var err error
+ key, err = strconv.Unquote(key)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to unquote")
+ }
+ case '\'':
+ if len(key) > 1 && key[len(key)-1] == '\'' {
+ key = key[1 : len(key)-1]
+ }
+ }
+ }
+ if key == selector {
if n.child == nil {
return value.Value, nil
}
@@ -518,7 +536,7 @@ func (n *selectorNode) filter(node ast.Node) (ast.Node, error) {
case ast.MappingValueType:
value := node.(*ast.MappingValueNode)
key := value.Key.GetToken().Value
- if key == n.selector {
+ if key == selector {
if n.child == nil {
return value.Value, nil
}
@@ -571,7 +589,9 @@ func (n *selectorNode) replace(node ast.Node, target ast.Node) error {
}
func (n *selectorNode) String() string {
- s := fmt.Sprintf(".%s", n.selector)
+ var builder PathBuilder
+ selector := builder.normalizeSelectorName(n.selector)
+ s := fmt.Sprintf(".%s", selector)
if n.child != nil {
s += n.child.String()
}
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path_test.go
index 4271da5..c0073ce 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path_test.go
@@ -61,6 +61,8 @@ store:
bicycle:
color: red
price: 19.95
+ bicycle*unicycle:
+ price: 20.25
`
tests := []struct {
name string
@@ -97,6 +99,11 @@ store:
path: builder().Root().Child("store").Child("bicycle").Child("price").Build(),
expected: float64(19.95),
},
+ {
+ name: `$.store.'bicycle*unicycle'.price`,
+ path: builder().Root().Child("store").Child(`bicycle*unicycle`).Child("price").Build(),
+ expected: float64(20.25),
+ },
}
t.Run("PathString", func(t *testing.T) {
for _, test := range tests {
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer.go
index d5e25dc..d5e25dc 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer_test.go
index 2afa74f..2afa74f 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer_test.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/context.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/context.go
index 09d0a2d..3aaec56 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/context.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/context.go
@@ -196,9 +196,16 @@ func (c *Context) existsBuffer() bool {
func (c *Context) bufferedSrc() []rune {
src := c.buf[:c.notSpaceCharPos]
- if len(src) > 0 && src[len(src)-1] == '\n' && c.isDocument() && c.literalOpt == "-" {
- // remove end '\n' character
- src = src[:len(src)-1]
+ if c.isDocument() && c.literalOpt == "-" {
+ // remove end '\n' character and trailing empty lines
+ // https://yaml.org/spec/1.2.2/#8112-block-chomping-indicator
+ for {
+ if len(src) > 0 && src[len(src)-1] == '\n' {
+ src = src[:len(src)-1]
+ continue
+ }
+ break
+ }
}
return src
}
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/scanner.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/scanner.go
index ce9c665..b0eac48 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/scanner.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/scanner.go
@@ -339,6 +339,11 @@ func (s *Scanner) scanDoubleQuote(ctx *Context) (tk *token.Token, pos int) {
value = append(value, '\n')
idx++
continue
+ case 'r':
+ ctx.addOriginBuf(nextChar)
+ value = append(value, '\r')
+ idx++
+ continue
case 'v':
ctx.addOriginBuf(nextChar)
value = append(value, '\v')
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/stdlib_quote.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/stdlib_quote.go
index be50ae6..be50ae6 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/stdlib_quote.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/stdlib_quote.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/struct.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/struct.go
index a3da8dd..a3da8dd 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/struct.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/struct.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/testdata/anchor.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/testdata/anchor.yml
index c016c7f..c016c7f 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/testdata/anchor.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/testdata/anchor.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token.go
index 182f4be..c86caab 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token.go
@@ -623,12 +623,12 @@ func IsNeedQuoted(value string) bool {
}
first := value[0]
switch first {
- case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"', '@':
+ case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"', '@', ' ':
return true
}
last := value[len(value)-1]
switch last {
- case ':':
+ case ':', ' ':
return true
}
if looksLikeTimeValue(value) {
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token_test.go
index 4f5764f..dc1b4df 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token_test.go
@@ -118,6 +118,9 @@ func TestIsNeedQuoted(t *testing.T) {
"Off",
"OFF",
"@test",
+ " a",
+ " a ",
+ "a ",
}
for i, test := range needQuotedTests {
if !token.IsNeedQuoted(test) {
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate.go
index 20a2d6d..20a2d6d 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate_test.go
index 265deb8..265deb8 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate_test.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml.go
index 2e541d8..25b1056 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml.go
@@ -89,43 +89,42 @@ func (s MapSlice) ToMap() map[interface{}]interface{} {
//
// The field tag format accepted is:
//
-// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
-// omitempty Only include the field if it's not set to the zero
-// value for the type or to empty slices or maps.
-// Zero valued structs will be omitted if all their public
-// fields are zero, unless they implement an IsZero
-// method (see the IsZeroer interface type), in which
-// case the field will be included if that method returns true.
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be included if that method returns true.
//
-// flow Marshal using a flow style (useful for structs,
-// sequences and maps).
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
//
-// inline Inline the field, which must be a struct or a map,
-// causing all of its fields or keys to be processed as if
-// they were part of the outer struct. For maps, keys must
-// not conflict with the yaml keys of other struct fields.
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
//
-// anchor Marshal with anchor. If want to define anchor name explicitly, use anchor=name style.
-// Otherwise, if used 'anchor' name only, used the field name lowercased as the anchor name
+// anchor Marshal with anchor. If want to define anchor name explicitly, use anchor=name style.
+// Otherwise, if used 'anchor' name only, used the field name lowercased as the anchor name
//
-// alias Marshal with alias. If want to define alias name explicitly, use alias=name style.
-// Otherwise, If omitted alias name and the field type is pointer type,
-// assigned anchor name automatically from same pointer address.
+// alias Marshal with alias. If want to define alias name explicitly, use alias=name style.
+// Otherwise, If omitted alias name and the field type is pointer type,
+// assigned anchor name automatically from same pointer address.
//
// In addition, if the key is "-", the field is ignored.
//
// For example:
//
-// type T struct {
-// F int `yaml:"a,omitempty"`
-// B int
-// }
-// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
-// yaml.Marshal(&T{F: 1}) // Returns "a: 1\nb: 0\n"
-//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}) // Returns "a: 1\nb: 0\n"
func Marshal(v interface{}) ([]byte, error) {
return MarshalWithOptions(v)
}
@@ -167,16 +166,15 @@ func ValueToNode(v interface{}, opts ...EncodeOption) (ast.Node, error) {
//
// For example:
//
-// type T struct {
-// F int `yaml:"a,omitempty"`
-// B int
-// }
-// var t T
-// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
// supported tag options.
-//
func Unmarshal(data []byte, v interface{}) error {
return UnmarshalWithOptions(data, v)
}
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml_test.go
index 5828629..4446d31 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml_test.go
@@ -2,6 +2,7 @@ package yaml_test
import (
"bytes"
+ "strings"
"testing"
"github.com/google/go-cmp/cmp"
@@ -1161,6 +1162,94 @@ hoge:
})
}
+func TestCommentMapRoundTrip(t *testing.T) {
+ // test that an unmarshal and marshal round trip retains comments.
+ // if expect is empty, the test will use the input as the expected result.
+ tests := []struct {
+ name string
+ source string
+ expect string
+ encodeOptions []yaml.EncodeOption
+ }{
+ {
+ name: "simple map",
+ source: `
+# head
+a: 1 # line
+# foot
+`,
+ },
+ {
+ name: "nesting",
+ source: `
+- 1 # one
+- foo:
+ a: b
+ # c comment
+ c: d # d comment
+ "e#f": g # g comment
+ h.i: j # j comment
+ "k.#l": m # m comment
+`,
+ },
+ {
+ name: "single quotes",
+ source: `'a#b': c # c comment`,
+ encodeOptions: []yaml.EncodeOption{yaml.UseSingleQuote(true)},
+ },
+ {
+ name: "single quotes added in encode",
+ source: `a#b: c # c comment`,
+ encodeOptions: []yaml.EncodeOption{yaml.UseSingleQuote(true)},
+ expect: `'a#b': c # c comment`,
+ },
+ {
+ name: "double quotes quotes transformed to single quotes",
+ source: `"a#b": c # c comment`,
+ encodeOptions: []yaml.EncodeOption{yaml.UseSingleQuote(true)},
+ expect: `'a#b': c # c comment`,
+ },
+ {
+ name: "single quotes quotes transformed to double quotes",
+ source: `'a#b': c # c comment`,
+ expect: `"a#b": c # c comment`,
+ },
+ {
+ name: "single quotes removed",
+ source: `'a': b # b comment`,
+ expect: `a: b # b comment`,
+ },
+ {
+ name: "double quotes removed",
+ source: `"a": b # b comment`,
+ expect: `a: b # b comment`,
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ var val any
+ cm := yaml.CommentMap{}
+ source := strings.TrimSpace(test.source)
+ if err := yaml.UnmarshalWithOptions([]byte(source), &val, yaml.CommentToMap(cm)); err != nil {
+ t.Fatalf("%+v", err)
+ }
+ marshaled, err := yaml.MarshalWithOptions(val, append(test.encodeOptions, yaml.WithComment(cm))...)
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ got := strings.TrimSpace(string(marshaled))
+ expect := strings.TrimSpace(test.expect)
+ if expect == "" {
+ expect = source
+ }
+ if got != expect {
+ t.Fatalf("expected:\n%s\ngot:\n%s\n", expect, got)
+ }
+ })
+
+ }
+}
+
func TestRegisterCustomMarshaler(t *testing.T) {
type T struct {
Foo []byte `yaml:"foo"`
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/.travis.yml b/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/.travis.yml
deleted file mode 100644
index d8156a6..0000000
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
- - 1.4.3
- - 1.5.3
- - tip
-
-script:
- - go test -v ./...
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTING.md b/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTING.md
deleted file mode 100644
index 04fdf09..0000000
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTING.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# How to contribute
-
-We definitely welcome patches and contribution to this project!
-
-### Legal requirements
-
-In order to protect both you and ourselves, you will need to sign the
-[Contributor License Agreement](https://cla.developers.google.com/clas).
-
-You may have already signed it for other Google projects.
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/CODEOWNERS b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/CODEOWNERS
new file mode 100644
index 0000000..91a5fa3
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/CODEOWNERS
@@ -0,0 +1,6 @@
+# Code owners file.
+# This file controls who is tagged for review for any given pull request.
+
+# For syntax help see:
+# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax
+* @google/go-uuid-contributors
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/release-please.yml b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/release-please.yml
new file mode 100644
index 0000000..e1bcc3c
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/release-please.yml
@@ -0,0 +1,2 @@
+handleGHRelease: true
+releaseType: go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/apidiff.yaml b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/apidiff.yaml
new file mode 100644
index 0000000..37c0b5f
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/apidiff.yaml
@@ -0,0 +1,26 @@
+---
+name: apidiff
+on:
+ pull_request:
+ branches:
+ - master
+permissions:
+ contents: read
+jobs:
+ compat:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/setup-go@v4
+ with:
+ go-version: 1.21
+ - run: go install golang.org/x/exp/cmd/apidiff@latest
+ - uses: actions/checkout@v3
+ with:
+ ref: master
+ - run: apidiff -w uuid.baseline .
+ - uses: actions/checkout@v3
+ with:
+ clean: false
+ - run: |
+ apidiff -incompatible uuid.baseline . > diff.txt
+ cat diff.txt && ! [ -s diff.txt ]
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/tests.yaml b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/tests.yaml
new file mode 100644
index 0000000..138397c
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/tests.yaml
@@ -0,0 +1,20 @@
+---
+name: tests
+on:
+ pull_request:
+ branches:
+ - master
+permissions:
+ contents: read
+jobs:
+ unit-tests:
+ strategy:
+ matrix:
+ go-version: [1.19, 1.20.x, 1.21]
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-go@v4
+ with:
+ go-version: ${{ matrix.go-version }}
+ - run: go test -v ./...
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CHANGELOG.md b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CHANGELOG.md
new file mode 100644
index 0000000..7ec5ac7
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Changelog
+
+## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
+
+
+### Features
+
+* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
+
+
+### Bug Fixes
+
+* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
+* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
+
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
+## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
+
+
+### Features
+
+* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
+
+### Fixes
+
+* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTING.md b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTING.md
new file mode 100644
index 0000000..a502fdc
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTING.md
@@ -0,0 +1,26 @@
+# How to contribute
+
+We definitely welcome patches and contribution to this project!
+
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as described in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+You may have already signed it for other Google projects.
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTORS b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTORS
index b4bb97f..b4bb97f 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTORS
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTORS
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/LICENSE b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/LICENSE
index 5dc6826..5dc6826 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/LICENSE
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/LICENSE
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/README.md b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/README.md
index f765a46..3e9a618 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/README.md
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/README.md
@@ -1,6 +1,6 @@
-# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+# uuid
The uuid package generates and inspects UUIDs based on
-[RFC 4122](http://tools.ietf.org/html/rfc4122)
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
@@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
-`go get github.com/google/uuid`
+```sh
+go get github.com/google/uuid
+```
###### Documentation
-[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/dce.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/dce.go
index fa820b9..fa820b9 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/dce.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/dce.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/doc.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/doc.go
index 5b8a4b9..5b8a4b9 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/doc.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/doc.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/go.mod b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/go.mod
index fc84cd7..fc84cd7 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/go.mod
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/go.mod
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/hash.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/hash.go
index b404f4b..dc60082 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/hash.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/hash.go
@@ -17,6 +17,12 @@ var (
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
+
+ // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
+ Max = UUID{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
)
// NewHash returns a new UUID derived from the hash of space concatenated with
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/json_test.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/json_test.go
index 245f91e..34241d5 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/json_test.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/json_test.go
@@ -31,6 +31,57 @@ func TestJSON(t *testing.T) {
}
}
+func TestJSONUnmarshal(t *testing.T) {
+ type S struct {
+ ID1 UUID
+ ID2 UUID `json:"ID2,omitempty"`
+ }
+
+ testCases := map[string]struct {
+ data []byte
+ expectedError error
+ expectedResult UUID
+ }{
+ "success": {
+ data: []byte(`{"ID1": "f47ac10b-58cc-0372-8567-0e02b2c3d479"}`),
+ expectedError: nil,
+ expectedResult: testUUID,
+ },
+ "zero": {
+ data: []byte(`{"ID1": "00000000-0000-0000-0000-000000000000"}`),
+ expectedError: nil,
+ expectedResult: Nil,
+ },
+ "null": {
+ data: []byte(`{"ID1": null}`),
+ expectedError: nil,
+ expectedResult: Nil,
+ },
+ "empty": {
+ data: []byte(`{"ID1": ""}`),
+ expectedError: invalidLengthError{len: 0},
+ expectedResult: Nil,
+ },
+ "omitempty": {
+ data: []byte(`{"ID2": ""}`),
+ expectedError: invalidLengthError{len: 0},
+ expectedResult: Nil,
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ var s S
+ if err := json.Unmarshal(tc.data, &s); err != tc.expectedError {
+ t.Errorf("unexpected error: got %v, want %v", err, tc.expectedError)
+ }
+ if !reflect.DeepEqual(s.ID1, tc.expectedResult) {
+ t.Errorf("got %#v, want %#v", s.ID1, tc.expectedResult)
+ }
+ })
+ }
+}
+
func BenchmarkUUID_MarshalJSON(b *testing.B) {
x := &struct {
UUID UUID `json:"uuid"`
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/marshal.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/marshal.go
index 14bd340..14bd340 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/marshal.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/marshal.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node.go
index d651a2b..d651a2b 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_js.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_js.go
index 24b78ed..b2a0bc8 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_js.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_js.go
@@ -7,6 +7,6 @@
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
-// This remvoves the "net" dependency, because it is not used in the browser.
+// This removes the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_net.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_net.go
index 0cbbcdd..0cbbcdd 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_net.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_net.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null.go
index d7fcbf2..d7fcbf2 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null_test.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null_test.go
index c6e5e69..c6e5e69 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null_test.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null_test.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/seq_test.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/seq_test.go
index 4f6c549..4f6c549 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/seq_test.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/seq_test.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql.go
index 2e02ec0..2e02ec0 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql_test.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql_test.go
index 1803dfd..1803dfd 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql_test.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql_test.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/time.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/time.go
index e6ef06c..c351129 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/time.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/time.go
@@ -108,12 +108,23 @@ func setClockSequence(seq int) {
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid. The time is only defined for version 1 and 2 UUIDs.
+// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
func (uuid UUID) Time() Time {
- time := int64(binary.BigEndian.Uint32(uuid[0:4]))
- time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
- time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
- return Time(time)
+ var t Time
+ switch uuid.Version() {
+ case 6:
+ time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+ t = Time(time)
+ case 7:
+ time := binary.BigEndian.Uint64(uuid[:8])
+ t = Time((time>>16)*10000 + g1582ns100)
+ default: // forward compatible
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ t = Time(time)
+ }
+ return t
}
// ClockSequence returns the clock sequence encoded in uuid.
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/util.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/util.go
index 5ea6c73..5ea6c73 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/util.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/util.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid.go
index a57207a..5232b48 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid.go
@@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool {
return ok
}
-// Parse decodes s into a UUID or returns an error. Both the standard UUID
-// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
-// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
-// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
+// the standard UUID forms defined in RFC 4122
+// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
+// Parse accepts non-standard strings such as the raw hex encoding
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
+// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
+// examined in the latter case. Parse should not be used to validate strings as
+// it parses non-standard encodings as indicated above.
func Parse(s string) (UUID, error) {
var uuid UUID
switch len(s) {
@@ -69,7 +73,7 @@ func Parse(s string) (UUID, error) {
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
- if strings.ToLower(s[:9]) != "urn:uuid:" {
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
@@ -101,7 +105,8 @@ func Parse(s string) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
@@ -117,7 +122,7 @@ func ParseBytes(b []byte) (UUID, error) {
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+ if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
@@ -145,7 +150,8 @@ func ParseBytes(b []byte) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
@@ -180,6 +186,59 @@ func Must(uuid UUID, err error) UUID {
return uuid
}
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+ switch len(s) {
+ // Standard UUID format
+ case 36:
+
+ // UUID with "urn:uuid:" prefix
+ case 36 + 9:
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
+ return fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // UUID enclosed in braces
+ case 36 + 2:
+ if s[0] != '{' || s[len(s)-1] != '}' {
+ return fmt.Errorf("invalid bracketed UUID format")
+ }
+ s = s[1 : len(s)-1]
+
+ // UUID without hyphens
+ case 32:
+ for i := 0; i < len(s); i += 2 {
+ _, ok := xtob(s[i], s[i+1])
+ if !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+
+ default:
+ return invalidLengthError{len(s)}
+ }
+
+ // Check for standard UUID format
+ if len(s) == 36 {
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return errors.New("invalid UUID format")
+ }
+ for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+ if _, ok := xtob(s[x], s[x+1]); !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+ }
+
+ return nil
+}
+
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
@@ -292,3 +351,15 @@ func DisableRandPool() {
poolMu.Lock()
poolPos = randPoolSize
}
+
+// UUIDs is a slice of UUID types.
+type UUIDs []UUID
+
+// Strings returns a string slice containing the string form of each UUID in uuids.
+func (uuids UUIDs) Strings() []string {
+ var uuidStrs = make([]string, len(uuids))
+ for i, uuid := range uuids {
+ uuidStrs[i] = uuid.String()
+ }
+ return uuidStrs
+}
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid_test.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid_test.go
index e98d0fe..c1c6001 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid_test.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid_test.go
@@ -6,6 +6,7 @@ package uuid
import (
"bytes"
+ "errors"
"fmt"
"os"
"runtime"
@@ -73,6 +74,9 @@ var tests = []test{
{"f47ac10b58cc037285670e02b2c3d479", 0, RFC4122, true},
{"f47ac10b58cc037285670e02b2c3d4790", 0, Invalid, false},
{"f47ac10b58cc037285670e02b2c3d47", 0, Invalid, false},
+
+ {"01ee836c-e7c9-619d-929a-525400475911", 6, RFC4122, true},
+ {"018bd12c-58b0-7683-8a5b-8752d0e86651", 7, RFC4122, true},
}
var constants = []struct {
@@ -569,6 +573,67 @@ func TestIsWrongLength(t *testing.T) {
}
}
+func FuzzParse(f *testing.F) {
+ for _, tt := range tests {
+ f.Add(tt.in)
+ f.Add(strings.ToUpper(tt.in))
+ }
+ f.Fuzz(func(t *testing.T, in string) {
+ Parse(in)
+ })
+}
+
+func FuzzParseBytes(f *testing.F) {
+ for _, tt := range tests {
+ f.Add([]byte(tt.in))
+ }
+ f.Fuzz(func(t *testing.T, in []byte) {
+ ParseBytes(in)
+ })
+}
+
+func FuzzFromBytes(f *testing.F) {
+ // Copied from TestFromBytes.
+ f.Add([]byte{
+ 0x7d, 0x44, 0x48, 0x40,
+ 0x9d, 0xc0,
+ 0x11, 0xd1,
+ 0xb2, 0x45,
+ 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2,
+ })
+ f.Fuzz(func(t *testing.T, in []byte) {
+ FromBytes(in)
+ })
+}
+
+// TestValidate checks various scenarios for the Validate function
+func TestValidate(t *testing.T) {
+ testCases := []struct {
+ name string
+ input string
+ expect error
+ }{
+ {"Valid UUID", "123e4567-e89b-12d3-a456-426655440000", nil},
+ {"Valid UUID with URN", "urn:uuid:123e4567-e89b-12d3-a456-426655440000", nil},
+ {"Valid UUID with Braces", "{123e4567-e89b-12d3-a456-426655440000}", nil},
+ {"Valid UUID No Hyphens", "123e4567e89b12d3a456426655440000", nil},
+ {"Invalid UUID", "invalid-uuid", errors.New("invalid UUID length: 12")},
+ {"Invalid Length", "123", fmt.Errorf("invalid UUID length: %d", len("123"))},
+ {"Invalid URN Prefix", "urn:test:123e4567-e89b-12d3-a456-426655440000", fmt.Errorf("invalid urn prefix: %q", "urn:test:")},
+ {"Invalid Brackets", "[123e4567-e89b-12d3-a456-426655440000]", fmt.Errorf("invalid bracketed UUID format")},
+ {"Invalid UUID Format", "12345678gabc1234abcd1234abcd1234", fmt.Errorf("invalid UUID format")},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ err := Validate(tc.input)
+ if (err != nil) != (tc.expect != nil) || (err != nil && err.Error() != tc.expect.Error()) {
+ t.Errorf("Validate(%q) = %v, want %v", tc.input, err, tc.expect)
+ }
+ })
+ }
+}
+
var asString = "f47ac10b-58cc-0372-8567-0e02b2c3d479"
var asBytes = []byte(asString)
@@ -700,3 +765,166 @@ func BenchmarkUUID_NewPooled(b *testing.B) {
}
})
}
+
+func BenchmarkUUIDs_Strings(b *testing.B) {
+ uuid1, err := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
+ if err != nil {
+ b.Fatal(err)
+ }
+ uuid2, err := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2")
+ if err != nil {
+ b.Fatal(err)
+ }
+ uuids := UUIDs{uuid1, uuid2}
+ for i := 0; i < b.N; i++ {
+ uuids.Strings()
+ }
+}
+
+func TestVersion6(t *testing.T) {
+ uuid1, err := NewV6()
+ if err != nil {
+ t.Fatalf("could not create UUID: %v", err)
+ }
+ uuid2, err := NewV6()
+ if err != nil {
+ t.Fatalf("could not create UUID: %v", err)
+ }
+
+ if uuid1 == uuid2 {
+ t.Errorf("%s:duplicate uuid", uuid1)
+ }
+ if v := uuid1.Version(); v != 6 {
+ t.Errorf("%s: version %s expected 6", uuid1, v)
+ }
+ if v := uuid2.Version(); v != 6 {
+ t.Errorf("%s: version %s expected 6", uuid2, v)
+ }
+ n1 := uuid1.NodeID()
+ n2 := uuid2.NodeID()
+ if !bytes.Equal(n1, n2) {
+ t.Errorf("Different nodes %x != %x", n1, n2)
+ }
+ t1 := uuid1.Time()
+ t2 := uuid2.Time()
+ q1 := uuid1.ClockSequence()
+ q2 := uuid2.ClockSequence()
+
+ switch {
+ case t1 == t2 && q1 == q2:
+ t.Error("time stopped")
+ case t1 > t2 && q1 == q2:
+ t.Error("time reversed")
+ case t1 < t2 && q1 != q2:
+ t.Error("clock sequence changed unexpectedly")
+ }
+}
+
+// uuid v7 time is only unix milliseconds, so
+// uuid1.Time() == uuid2.Time() is right, but uuid1 must != uuid2
+func TestVersion7(t *testing.T) {
+ SetRand(nil)
+ m := make(map[string]bool)
+ for x := 1; x < 128; x++ {
+ uuid, err := NewV7()
+ if err != nil {
+ t.Fatalf("could not create UUID: %v", err)
+ }
+ s := uuid.String()
+ if m[s] {
+ t.Errorf("NewV7 returned duplicated UUID %s", s)
+ }
+ m[s] = true
+ if v := uuid.Version(); v != 7 {
+ t.Errorf("UUID of version %s", v)
+ }
+ if uuid.Variant() != RFC4122 {
+ t.Errorf("UUID is variant %d", uuid.Variant())
+ }
+ }
+}
+
+// uuid v7 time is only unix milliseconds, so
+// uuid1.Time() == uuid2.Time() is right, but uuid1 must != uuid2
+func TestVersion7_pooled(t *testing.T) {
+ SetRand(nil)
+ EnableRandPool()
+ defer DisableRandPool()
+
+ m := make(map[string]bool)
+ for x := 1; x < 128; x++ {
+ uuid, err := NewV7()
+ if err != nil {
+ t.Fatalf("could not create UUID: %v", err)
+ }
+ s := uuid.String()
+ if m[s] {
+ t.Errorf("NewV7 returned duplicated UUID %s", s)
+ }
+ m[s] = true
+ if v := uuid.Version(); v != 7 {
+ t.Errorf("UUID of version %s", v)
+ }
+ if uuid.Variant() != RFC4122 {
+ t.Errorf("UUID is variant %d", uuid.Variant())
+ }
+ }
+}
+
+func TestVersion7FromReader(t *testing.T) {
+ myString := "8059ddhdle77cb52"
+ r := bytes.NewReader([]byte(myString))
+ _, err := NewV7FromReader(r)
+ if err != nil {
+ t.Errorf("failed generating UUID from a reader")
+ }
+ _, err = NewV7FromReader(r)
+ if err == nil {
+ t.Errorf("expecting an error as reader has no more bytes. Got uuid. NewV7FromReader may not be using the provided reader")
+ }
+}
+
+func TestVersion7Monotonicity(t *testing.T) {
+ length := 10000
+ u1 := Must(NewV7()).String()
+ for i := 0; i < length; i++ {
+ u2 := Must(NewV7()).String()
+ if u2 <= u1 {
+ t.Errorf("monotonicity failed at #%d: %s(next) < %s(before)", i, u2, u1)
+ break
+ }
+ u1 = u2
+ }
+}
+
+type fakeRand struct{}
+
+func (g fakeRand) Read(bs []byte) (int, error) {
+ for i, _ := range bs {
+ bs[i] = 0x88
+ }
+ return len(bs), nil
+}
+
+func TestVersion7MonotonicityStrict(t *testing.T) {
+ timeNow = func() time.Time {
+ return time.Date(2008, 8, 8, 8, 8, 8, 8, time.UTC)
+ }
+ defer func() {
+ timeNow = time.Now
+ }()
+
+ SetRand(fakeRand{})
+ defer SetRand(nil)
+
+ length := 100000 // > 3906
+ u1 := Must(NewV7()).String()
+ for i := 0; i < length; i++ {
+ u2 := Must(NewV7()).String()
+ if u2 <= u1 {
+ t.Errorf("monotonicity failed at #%d: %s(next) < %s(before)", i, u2, u1)
+ break
+ }
+ u1 = u2
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version1.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version1.go
index 4631096..4631096 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version1.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version1.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version4.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version4.go
index 7697802..7697802 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version4.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version4.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version6.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version6.go
new file mode 100644
index 0000000..339a959
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_high |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_mid | time_low_and_version |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |clk_seq_hi_res | clk_seq_low | node (0-1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | node (2-5) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+
+ uuid[6] = 0x60 | (uuid[6] & 0x0F)
+ uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ copy(uuid[10:], nodeID[:])
+ nodeMu.Unlock()
+
+ return uuid, nil
+}
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version7.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version7.go
new file mode 100644
index 0000000..3167b64
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version7.go
@@ -0,0 +1,104 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+ uuid, err := NewRandom()
+ if err != nil {
+ return uuid, err
+ }
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+ uuid, err := NewRandomFromReader(r)
+ if err != nil {
+ return uuid, err
+ }
+
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms | ver | rand_a (12 bit seq) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |var| rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ _ = uuid[15] // bounds check
+
+ t, s := getV7Time()
+
+ uuid[0] = byte(t >> 40)
+ uuid[1] = byte(t >> 32)
+ uuid[2] = byte(t >> 24)
+ uuid[3] = byte(t >> 16)
+ uuid[4] = byte(t >> 8)
+ uuid[5] = byte(t)
+
+ uuid[6] = 0x70 | (0x0F & byte(s>>8))
+ uuid[7] = byte(s)
+}
+
+// lastV7time is the last time we returned stored as:
+//
+// 52 bits of time in milliseconds since epoch
+// 12 bits of (fractional nanoseconds) >> 8
+var lastV7time int64
+
+const nanoPerMilli = 1000000
+
+// getV7Time returns the time in milliseconds and nanoseconds / 256.
+// The returned (milli << 12 + seq) is guarenteed to be greater than
+// (milli << 12 + seq) returned by any previous call to getV7Time.
+func getV7Time() (milli, seq int64) {
+ timeMu.Lock()
+ defer timeMu.Unlock()
+
+ nano := timeNow().UnixNano()
+ milli = nano / nanoPerMilli
+ // Sequence number is between 0 and 3906 (nanoPerMilli>>8)
+ seq = (nano - milli*nanoPerMilli) >> 8
+ now := milli<<12 + seq
+ if now <= lastV7time {
+ now = lastV7time + 1
+ milli = now >> 12
+ seq = now & 0xfff
+ }
+ lastV7time = now
+ return milli, seq
+}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.codecov.yml b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.codecov.yml
index 35cde5c..35cde5c 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.codecov.yml
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.codecov.yml
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/FUNDING.yml b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/FUNDING.yml
index 8f80494..8f80494 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/FUNDING.yml
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/FUNDING.yml
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/cifuzz.yaml b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/cifuzz.yaml
index e198c52..e198c52 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/cifuzz.yaml
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/cifuzz.yaml
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/docker.yaml b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/docker.yaml
index 83faeb6..7983de4 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/docker.yaml
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/docker.yaml
@@ -17,6 +17,5 @@ jobs:
- name: Run example - simple
run: |
- cd ./_example/simple
- docker build -t simple .
+ docker build -t simple -f ./_example/simple/Dockerfile .
docker run simple | grep 99\ こんにちは世界099
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/go.yaml b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/go.yaml
index 923274b..c96bf31 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/go.yaml
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/go.yaml
@@ -14,7 +14,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
- go: ['1.18', '1.19', '1.20']
+ go: ['1.19', '1.20', '1.21']
fail-fast: false
env:
OS: ${{ matrix.os }}
@@ -64,7 +64,7 @@ jobs:
strategy:
matrix:
- go: ['1.18', '1.19', '1.20']
+ go: ['1.19', '1.20', '1.21']
fail-fast: false
env:
OS: windows-latest
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.gitignore b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.gitignore
index fa0e6b5..fa0e6b5 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.gitignore
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.gitignore
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/LICENSE b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/LICENSE
index ca458bb..ca458bb 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/LICENSE
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/LICENSE
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/README.md b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/README.md
index 1804a89..1804a89 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/README.md
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/README.md
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/Makefile b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/Makefile
index 91fcde6..91fcde6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/Makefile
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/Makefile
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/main.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/main.go
index 3148cae..3148cae 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/main.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/main.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_func/main.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_func/main.go
index 85657e6..85657e6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_func/main.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_func/main.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/fuzz/fuzz_openexec.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/fuzz/fuzz_openexec.go
index 5326044..5326044 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/fuzz/fuzz_openexec.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/fuzz/fuzz_openexec.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/hook/hook.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/hook/hook.go
index 6023181..6023181 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/hook/hook.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/hook/hook.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/json/json.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/json/json.go
new file mode 100644
index 0000000..181934b
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/json/json.go
@@ -0,0 +1,81 @@
+package main
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ _ "github.com/mattn/go-sqlite3"
+ "log"
+ "os"
+)
+
+type Tag struct {
+ Name string `json:"name"`
+ Country string `json:"country"`
+}
+
+func (t *Tag) Scan(value interface{}) error {
+ return json.Unmarshal([]byte(value.(string)), t)
+}
+
+func (t *Tag) Value() (driver.Value, error) {
+ b, err := json.Marshal(t)
+ return string(b), err
+}
+
+func main() {
+ os.Remove("./foo.db")
+
+ db, err := sql.Open("sqlite3", "./foo.db")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+
+ _, err = db.Exec(`create table foo (tag jsonb)`)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ stmt, err := db.Prepare("insert into foo(tag) values(?)")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer stmt.Close()
+ _, err = stmt.Exec(`{"name": "mattn", "country": "japan"}`)
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, err = stmt.Exec(`{"name": "michael", "country": "usa"}`)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var country string
+ err = db.QueryRow("select tag->>'country' from foo where tag->>'name' = 'mattn'").Scan(&country)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(country)
+
+ var tag Tag
+ err = db.QueryRow("select tag from foo where tag->>'name' = 'mattn'").Scan(&tag)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(tag.Name)
+
+ tag.Country = "日本"
+ _, err = db.Exec(`update foo set tag = ? where tag->>'name' == 'mattn'`, &tag)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ err = db.QueryRow("select tag->>'country' from foo where tag->>'name' = 'mattn'").Scan(&country)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(country)
+}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/limit/limit.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/limit/limit.go
index bcba819..c1adfe8 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/limit/limit.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/limit/limit.go
@@ -10,9 +10,9 @@ import (
"github.com/mattn/go-sqlite3"
)
-func createBulkInsertQuery(n int, start int) (query string, args []interface{}) {
+func createBulkInsertQuery(n int, start int) (query string, args []any) {
values := make([]string, n)
- args = make([]interface{}, n*2)
+ args = make([]any, n*2)
pos := 0
for i := 0; i < n; i++ {
values[i] = "(?, ?)"
@@ -27,7 +27,7 @@ func createBulkInsertQuery(n int, start int) (query string, args []interface{})
return
}
-func bulkInsert(db *sql.DB, query string, args []interface{}) (err error) {
+func bulkInsert(db *sql.DB, query string, args []any) (err error) {
stmt, err := db.Prepare(query)
if err != nil {
return
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/Makefile b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/Makefile
index 1ef69a6..1ef69a6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/Makefile
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/Makefile
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/extension.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/extension.go
index 61ceb55..61ceb55 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/extension.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/extension.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/sqlite3_mod_regexp.c b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/sqlite3_mod_regexp.c
index d3ad149..d3ad149 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/sqlite3_mod_regexp.c
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/sqlite3_mod_regexp.c
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/Makefile b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/Makefile
index f65a004..f65a004 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/Makefile
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/Makefile
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/extension.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/extension.go
index f738af6..f738af6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/extension.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/extension.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/picojson.h b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/picojson.h
index 2142647..2142647 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/picojson.h
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/picojson.h
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/sqlite3_mod_vtable.cc b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/sqlite3_mod_vtable.cc
index 4caf484..4caf484 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/sqlite3_mod_vtable.cc
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/sqlite3_mod_vtable.cc
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/Dockerfile b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/Dockerfile
index c19f6e6..8ed0473 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/Dockerfile
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/Dockerfile
@@ -9,7 +9,7 @@
# -----------------------------------------------------------------------------
# Build Stage
# -----------------------------------------------------------------------------
-FROM golang:alpine AS build
+FROM golang:alpine3.18 AS build
# Important:
# Because this is a CGO enabled package, you are required to set it as 1.
@@ -26,7 +26,9 @@ WORKDIR /workspace
COPY . /workspace/
RUN \
+ cd _example/simple && \
go mod init github.com/mattn/sample && \
+ go mod edit -replace=github.com/mattn/go-sqlite3=../.. && \
go mod tidy && \
go install -ldflags='-s -w -extldflags "-static"' ./simple.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/simple.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/simple.go
index 0c34791..0c34791 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/simple.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/simple.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/trace/main.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/trace/main.go
index bef3d15..bef3d15 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/trace/main.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/trace/main.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/main.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/main.go
index aad8dda..aad8dda 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/main.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/main.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/vtable.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/vtable.go
index 10d12a9..c65535b 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/vtable.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/vtable.go
@@ -93,7 +93,7 @@ func (vc *ghRepoCursor) Column(c *sqlite3.SQLiteContext, col int) error {
return nil
}
-func (vc *ghRepoCursor) Filter(idxNum int, idxStr string, vals []interface{}) error {
+func (vc *ghRepoCursor) Filter(idxNum int, idxStr string, vals []any) error {
vc.index = 0
return nil
}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/main.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/main.go
index 17b58af..17b58af 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/main.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/main.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/vtable.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/vtable.go
index 49fc0b7..9f22ebc 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/vtable.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/vtable.go
@@ -77,7 +77,7 @@ func (vc *seriesCursor) Column(c *sqlite3.SQLiteContext, col int) error {
return nil
}
-func (vc *seriesCursor) Filter(idxNum int, idxStr string, vals []interface{}) error {
+func (vc *seriesCursor) Filter(idxNum int, idxStr string, vals []any) error {
switch {
case len(vals) < 1:
vc.seriesTable.start = 0
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup.go
index ecbb469..ecbb469 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup_test.go
index 6d857de..b3ad0b5 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback.go
index d305691..b794bcd 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback.go
@@ -100,13 +100,13 @@ func preUpdateHookTrampoline(handle unsafe.Pointer, dbHandle uintptr, op int, db
// Use handles to avoid passing Go pointers to C.
type handleVal struct {
db *SQLiteConn
- val interface{}
+ val any
}
var handleLock sync.Mutex
var handleVals = make(map[unsafe.Pointer]handleVal)
-func newHandle(db *SQLiteConn, v interface{}) unsafe.Pointer {
+func newHandle(db *SQLiteConn, v any) unsafe.Pointer {
handleLock.Lock()
defer handleLock.Unlock()
val := handleVal{db: db, val: v}
@@ -124,7 +124,7 @@ func lookupHandleVal(handle unsafe.Pointer) handleVal {
return handleVals[handle]
}
-func lookupHandle(handle unsafe.Pointer) interface{} {
+func lookupHandle(handle unsafe.Pointer) any {
return lookupHandleVal(handle).val
}
@@ -238,7 +238,7 @@ func callbackArg(typ reflect.Type) (callbackArgConverter, error) {
switch typ.Kind() {
case reflect.Interface:
if typ.NumMethod() != 0 {
- return nil, errors.New("the only supported interface type is interface{}")
+ return nil, errors.New("the only supported interface type is any")
}
return callbackArgGeneric, nil
case reflect.Slice:
@@ -360,11 +360,11 @@ func callbackRetGeneric(ctx *C.sqlite3_context, v reflect.Value) error {
}
cb, err := callbackRet(v.Elem().Type())
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
+ }
- return cb(ctx, v.Elem())
+ return cb(ctx, v.Elem())
}
func callbackRet(typ reflect.Type) (callbackRetConverter, error) {
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback_test.go
index b09122a..8163f2f 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
@@ -53,7 +54,7 @@ func TestCallbackArgCast(t *testing.T) {
func TestCallbackConverters(t *testing.T) {
tests := []struct {
- v interface{}
+ v any
err bool
}{
// Unfortunately, we can't tell which converter was returned,
@@ -104,7 +105,7 @@ func TestCallbackConverters(t *testing.T) {
}
func TestCallbackReturnAny(t *testing.T) {
- udf := func() interface{} {
+ udf := func() any {
return 1
}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/convert.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/convert.go
index 0385073..f7a9dcd 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/convert.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/convert.go
@@ -23,7 +23,7 @@ var errNilPtr = errors.New("destination pointer is nil") // embedded in descript
// convertAssign copies to dest the value in src, converting it if possible.
// An error is returned if the copy would result in loss of information.
// dest should be a pointer type.
-func convertAssign(dest, src interface{}) error {
+func convertAssign(dest, src any) error {
// Common cases, without reflect.
switch s := src.(type) {
case string:
@@ -55,7 +55,7 @@ func convertAssign(dest, src interface{}) error {
}
*d = string(s)
return nil
- case *interface{}:
+ case *any:
if d == nil {
return errNilPtr
}
@@ -97,7 +97,7 @@ func convertAssign(dest, src interface{}) error {
}
case nil:
switch d := dest.(type) {
- case *interface{}:
+ case *any:
if d == nil {
return errNilPtr
}
@@ -149,7 +149,7 @@ func convertAssign(dest, src interface{}) error {
*d = bv.(bool)
}
return err
- case *interface{}:
+ case *any:
*d = src
return nil
}
@@ -256,7 +256,7 @@ func cloneBytes(b []byte) []byte {
return c
}
-func asString(src interface{}) string {
+func asString(src any) string {
switch v := src.(type) {
case string:
return v
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/doc.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/doc.go
index ac27633..a3bcebb 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/doc.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/doc.go
@@ -5,63 +5,63 @@ This works as a driver for database/sql.
Installation
- go get github.com/mattn/go-sqlite3
+ go get github.com/mattn/go-sqlite3
-Supported Types
+# Supported Types
Currently, go-sqlite3 supports the following data types.
- +------------------------------+
- |go | sqlite3 |
- |----------|-------------------|
- |nil | null |
- |int | integer |
- |int64 | integer |
- |float64 | float |
- |bool | integer |
- |[]byte | blob |
- |string | text |
- |time.Time | timestamp/datetime|
- +------------------------------+
-
-SQLite3 Extension
+ +------------------------------+
+ |go | sqlite3 |
+ |----------|-------------------|
+ |nil | null |
+ |int | integer |
+ |int64 | integer |
+ |float64 | float |
+ |bool | integer |
+ |[]byte | blob |
+ |string | text |
+ |time.Time | timestamp/datetime|
+ +------------------------------+
+
+# SQLite3 Extension
You can write your own extension module for sqlite3. For example, below is an
extension for a Regexp matcher operation.
- #include <pcre.h>
- #include <string.h>
- #include <stdio.h>
- #include <sqlite3ext.h>
-
- SQLITE_EXTENSION_INIT1
- static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
- if (argc >= 2) {
- const char *target = (const char *)sqlite3_value_text(argv[1]);
- const char *pattern = (const char *)sqlite3_value_text(argv[0]);
- const char* errstr = NULL;
- int erroff = 0;
- int vec[500];
- int n, rc;
- pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
- rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
- if (rc <= 0) {
- sqlite3_result_error(context, errstr, 0);
- return;
- }
- sqlite3_result_int(context, 1);
- }
- }
-
- #ifdef _WIN32
- __declspec(dllexport)
- #endif
- int sqlite3_extension_init(sqlite3 *db, char **errmsg,
- const sqlite3_api_routines *api) {
- SQLITE_EXTENSION_INIT2(api);
- return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8,
- (void*)db, regexp_func, NULL, NULL);
- }
+ #include <pcre.h>
+ #include <string.h>
+ #include <stdio.h>
+ #include <sqlite3ext.h>
+
+ SQLITE_EXTENSION_INIT1
+ static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
+ if (argc >= 2) {
+ const char *target = (const char *)sqlite3_value_text(argv[1]);
+ const char *pattern = (const char *)sqlite3_value_text(argv[0]);
+ const char* errstr = NULL;
+ int erroff = 0;
+ int vec[500];
+ int n, rc;
+ pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
+ rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
+ if (rc <= 0) {
+ sqlite3_result_error(context, errstr, 0);
+ return;
+ }
+ sqlite3_result_int(context, 1);
+ }
+ }
+
+ #ifdef _WIN32
+ __declspec(dllexport)
+ #endif
+ int sqlite3_extension_init(sqlite3 *db, char **errmsg,
+ const sqlite3_api_routines *api) {
+ SQLITE_EXTENSION_INIT2(api);
+ return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8,
+ (void*)db, regexp_func, NULL, NULL);
+ }
It needs to be built as a so/dll shared library. And you need to register
the extension module like below.
@@ -77,7 +77,7 @@ Then, you can use this extension.
rows, err := db.Query("select text from mytable where name regexp '^golang'")
-Connection Hook
+# Connection Hook
You can hook and inject your code when the connection is established by setting
ConnectHook to get the SQLiteConn.
@@ -95,13 +95,13 @@ You can also use database/sql.Conn.Raw (Go >= 1.13):
conn, err := db.Conn(context.Background())
// if err != nil { ... }
defer conn.Close()
- err = conn.Raw(func (driverConn interface{}) error {
+ err = conn.Raw(func (driverConn any) error {
sqliteConn := driverConn.(*sqlite3.SQLiteConn)
// ... use sqliteConn
})
// if err != nil { ... }
-Go SQlite3 Extensions
+# Go SQlite3 Extensions
If you want to register Go functions as SQLite extension functions
you can make a custom driver by calling RegisterFunction from
@@ -130,6 +130,5 @@ You can then use the custom driver by passing its name to sql.Open.
}
See the documentation of RegisterFunc for more details.
-
*/
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error.go
index 58ab252..58ab252 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error_test.go
index 3cfad06..0ff14c1 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.mod b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.mod
index 89788ab..e342dcc 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.mod
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.mod
@@ -1,6 +1,6 @@
module github.com/mattn/go-sqlite3
-go 1.16
+go 1.19
retract (
[v2.0.0+incompatible, v2.0.6+incompatible] // Accidental; no major changes or features.
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.sum b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.sum
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.sum
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.c b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.c
index a1d6a28..53d7560 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.c
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.c
@@ -1,7 +1,7 @@
#ifndef USE_LIBSQLITE3
/******************************************************************************
** This file is an amalgamation of many separate C source files from SQLite
-** version 3.42.0. By combining all the individual C code files into this
+** version 3.45.1. By combining all the individual C code files into this
** single large file, the entire code can be compiled as a single translation
** unit. This allows many compilers to do optimizations that would not be
** possible if the files were compiled separately. Performance improvements
@@ -17,6 +17,9 @@
** if you want a wrapper to interface SQLite with your choice of programming
** language. The code for the "sqlite3" command-line shell is also in a
** separate file. This file contains only code for the core SQLite library.
+**
+** The content in this amalgamation comes from Fossil check-in
+** e876e51a0ed5c5b3126f52e532044363a014.
*/
#define SQLITE_CORE 1
#define SQLITE_AMALGAMATION 1
@@ -51,11 +54,11 @@
** used on lines of code that actually
** implement parts of coverage testing.
**
-** OPTIMIZATION-IF-TRUE - This branch is allowed to alway be false
+** OPTIMIZATION-IF-TRUE - This branch is allowed to always be false
** and the correct answer is still obtained,
** though perhaps more slowly.
**
-** OPTIMIZATION-IF-FALSE - This branch is allowed to alway be true
+** OPTIMIZATION-IF-FALSE - This branch is allowed to always be true
** and the correct answer is still obtained,
** though perhaps more slowly.
**
@@ -457,9 +460,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.42.0"
-#define SQLITE_VERSION_NUMBER 3042000
-#define SQLITE_SOURCE_ID "2023-05-16 12:36:15 831d0fb2836b71c9bc51067c49fee4b8f18047814f2ff22d817d25195cf350b0"
+#define SQLITE_VERSION "3.45.1"
+#define SQLITE_VERSION_NUMBER 3045001
+#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -839,6 +842,7 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8))
#define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8))
#define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8))
+#define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8))
#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
#define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8))
#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
@@ -1501,7 +1505,7 @@ struct sqlite3_io_methods {
** by clients within the current process, only within other processes.
**
** <li>[[SQLITE_FCNTL_CKSM_FILE]]
-** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use interally by the
+** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use internally by the
** [checksum VFS shim] only.
**
** <li>[[SQLITE_FCNTL_RESET_CACHE]]
@@ -2437,7 +2441,7 @@ struct sqlite3_mem_methods {
** is stored in each sorted record and the required column values loaded
** from the database as records are returned in sorted order. The default
** value for this option is to never use this optimization. Specifying a
-** negative value for this option restores the default behaviour.
+** negative value for this option restores the default behavior.
** This option is only available if SQLite is compiled with the
** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option.
**
@@ -2612,7 +2616,7 @@ struct sqlite3_mem_methods {
** database handle, SQLite checks if this will mean that there are now no
** connections at all to the database. If so, it performs a checkpoint
** operation before closing the connection. This option may be used to
-** override this behaviour. The first parameter passed to this operation
+** override this behavior. The first parameter passed to this operation
** is an integer - positive to disable checkpoints-on-close, or zero (the
** default) to enable them, and negative to leave the setting unchanged.
** The second parameter is a pointer to an integer
@@ -2765,7 +2769,7 @@ struct sqlite3_mem_methods {
** the [VACUUM] command will fail with an obscure error when attempting to
** process a table with generated columns and a descending index. This is
** not considered a bug since SQLite versions 3.3.0 and earlier do not support
-** either generated columns or decending indexes.
+** either generated columns or descending indexes.
** </dd>
**
** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]]
@@ -3046,6 +3050,7 @@ SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*);
**
** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether
** or not an interrupt is currently in effect for [database connection] D.
+** It returns 1 if an interrupt is currently in effect, or 0 otherwise.
*/
SQLITE_API void sqlite3_interrupt(sqlite3*);
SQLITE_API int sqlite3_is_interrupted(sqlite3*);
@@ -3699,8 +3704,10 @@ SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*,
** M argument should be the bitwise OR-ed combination of
** zero or more [SQLITE_TRACE] constants.
**
-** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides
-** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2().
+** ^Each call to either sqlite3_trace(D,X,P) or sqlite3_trace_v2(D,M,X,P)
+** overrides (cancels) all prior calls to sqlite3_trace(D,X,P) or
+** sqlite3_trace_v2(D,M,X,P) for the [database connection] D. Each
+** database connection may have at most one trace callback.
**
** ^The X callback is invoked whenever any of the events identified by
** mask M occur. ^The integer return value from the callback is currently
@@ -4069,7 +4076,7 @@ SQLITE_API int sqlite3_open_v2(
** as F) must be one of:
** <ul>
** <li> A database filename pointer created by the SQLite core and
-** passed into the xOpen() method of a VFS implemention, or
+** passed into the xOpen() method of a VFS implementation, or
** <li> A filename obtained from [sqlite3_db_filename()], or
** <li> A new filename constructed using [sqlite3_create_filename()].
** </ul>
@@ -4182,7 +4189,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*);
/*
** CAPI3REF: Create and Destroy VFS Filenames
**
-** These interfces are provided for use by [VFS shim] implementations and
+** These interfaces are provided for use by [VFS shim] implementations and
** are not useful outside of that context.
**
** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of
@@ -4261,14 +4268,17 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename);
** </ul>
**
** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
-** text that describes the error, as either UTF-8 or UTF-16 respectively.
+** text that describes the error, as either UTF-8 or UTF-16 respectively,
+** or NULL if no error message is available.
+** (See how SQLite handles [invalid UTF] for exceptions to this rule.)
** ^(Memory to hold the error message string is managed internally.
** The application does not need to worry about freeing the result.
** However, the error string might be overwritten or deallocated by
** subsequent calls to other SQLite interface functions.)^
**
-** ^The sqlite3_errstr() interface returns the English-language text
-** that describes the [result code], as UTF-8.
+** ^The sqlite3_errstr(E) interface returns the English-language text
+** that describes the [result code] E, as UTF-8, or NULL if E is not an
+** result code for which a text error message is available.
** ^(Memory to hold the error message string is managed internally
** and must not be freed by the application)^.
**
@@ -4730,6 +4740,41 @@ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt);
/*
+** CAPI3REF: Change The EXPLAIN Setting For A Prepared Statement
+** METHOD: sqlite3_stmt
+**
+** The sqlite3_stmt_explain(S,E) interface changes the EXPLAIN
+** setting for [prepared statement] S. If E is zero, then S becomes
+** a normal prepared statement. If E is 1, then S behaves as if
+** its SQL text began with "[EXPLAIN]". If E is 2, then S behaves as if
+** its SQL text began with "[EXPLAIN QUERY PLAN]".
+**
+** Calling sqlite3_stmt_explain(S,E) might cause S to be reprepared.
+** SQLite tries to avoid a reprepare, but a reprepare might be necessary
+** on the first transition into EXPLAIN or EXPLAIN QUERY PLAN mode.
+**
+** Because of the potential need to reprepare, a call to
+** sqlite3_stmt_explain(S,E) will fail with SQLITE_ERROR if S cannot be
+** reprepared because it was created using [sqlite3_prepare()] instead of
+** the newer [sqlite3_prepare_v2()] or [sqlite3_prepare_v3()] interfaces and
+** hence has no saved SQL text with which to reprepare.
+**
+** Changing the explain setting for a prepared statement does not change
+** the original SQL text for the statement. Hence, if the SQL text originally
+** began with EXPLAIN or EXPLAIN QUERY PLAN, but sqlite3_stmt_explain(S,0)
+** is called to convert the statement into an ordinary statement, the EXPLAIN
+** or EXPLAIN QUERY PLAN keywords will still appear in the sqlite3_sql(S)
+** output, even though the statement now acts like a normal SQL statement.
+**
+** This routine returns SQLITE_OK if the explain mode is successfully
+** changed, or an error code if the explain mode could not be changed.
+** The explain mode cannot be changed while a statement is active.
+** Hence, it is good practice to call [sqlite3_reset(S)]
+** immediately prior to calling sqlite3_stmt_explain(S,E).
+*/
+SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode);
+
+/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
** METHOD: sqlite3_stmt
**
@@ -4892,7 +4937,7 @@ typedef struct sqlite3_context sqlite3_context;
** with it may be passed. ^It is called to dispose of the BLOB or string even
** if the call to the bind API fails, except the destructor is not called if
** the third parameter is a NULL pointer or the fourth parameter is negative.
-** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that
+** ^ (2) The special constant, [SQLITE_STATIC], may be passed to indicate that
** the application remains responsible for disposing of the object. ^In this
** case, the object and the provided pointer to it must remain valid until
** either the prepared statement is finalized or the same SQL parameter is
@@ -5571,20 +5616,33 @@ SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt);
** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S
** back to the beginning of its program.
**
-** ^If the most recent call to [sqlite3_step(S)] for the
-** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE],
-** or if [sqlite3_step(S)] has never before been called on S,
-** then [sqlite3_reset(S)] returns [SQLITE_OK].
+** ^The return code from [sqlite3_reset(S)] indicates whether or not
+** the previous evaluation of prepared statement S completed successfully.
+** ^If [sqlite3_step(S)] has never before been called on S or if
+** [sqlite3_step(S)] has not been called since the previous call
+** to [sqlite3_reset(S)], then [sqlite3_reset(S)] will return
+** [SQLITE_OK].
**
** ^If the most recent call to [sqlite3_step(S)] for the
** [prepared statement] S indicated an error, then
** [sqlite3_reset(S)] returns an appropriate [error code].
+** ^The [sqlite3_reset(S)] interface might also return an [error code]
+** if there were no prior errors but the process of resetting
+** the prepared statement caused a new error. ^For example, if an
+** [INSERT] statement with a [RETURNING] clause is only stepped one time,
+** that one call to [sqlite3_step(S)] might return SQLITE_ROW but
+** the overall statement might still fail and the [sqlite3_reset(S)] call
+** might return SQLITE_BUSY if locking constraints prevent the
+** database change from committing. Therefore, it is important that
+** applications check the return code from [sqlite3_reset(S)] even if
+** no prior call to [sqlite3_step(S)] indicated a problem.
**
** ^The [sqlite3_reset(S)] interface does not change the values
** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
*/
SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
+
/*
** CAPI3REF: Create Or Redefine SQL Functions
** KEYWORDS: {function creation routines}
@@ -5795,7 +5853,7 @@ SQLITE_API int sqlite3_create_window_function(
** [application-defined SQL function]
** that has side-effects or that could potentially leak sensitive information.
** This will prevent attacks in which an application is tricked
-** into using a database file that has had its schema surreptiously
+** into using a database file that has had its schema surreptitiously
** modified to invoke the application-defined function in ways that are
** harmful.
** <p>
@@ -5831,13 +5889,27 @@ SQLITE_API int sqlite3_create_window_function(
** </dd>
**
** [[SQLITE_SUBTYPE]] <dt>SQLITE_SUBTYPE</dt><dd>
-** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call
+** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call
** [sqlite3_value_subtype()] to inspect the sub-types of its arguments.
-** Specifying this flag makes no difference for scalar or aggregate user
-** functions. However, if it is not specified for a user-defined window
-** function, then any sub-types belonging to arguments passed to the window
-** function may be discarded before the window function is called (i.e.
-** sqlite3_value_subtype() will always return 0).
+** This flag instructs SQLite to omit some corner-case optimizations that
+** might disrupt the operation of the [sqlite3_value_subtype()] function,
+** causing it to return zero rather than the correct subtype().
+** SQL functions that invokes [sqlite3_value_subtype()] should have this
+** property. If the SQLITE_SUBTYPE property is omitted, then the return
+** value from [sqlite3_value_subtype()] might sometimes be zero even though
+** a non-zero subtype was specified by the function argument expression.
+**
+** [[SQLITE_RESULT_SUBTYPE]] <dt>SQLITE_RESULT_SUBTYPE</dt><dd>
+** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call
+** [sqlite3_result_subtype()] to cause a sub-type to be associated with its
+** result.
+** Every function that invokes [sqlite3_result_subtype()] should have this
+** property. If it does not, then the call to [sqlite3_result_subtype()]
+** might become a no-op if the function is used as term in an
+** [expression index]. On the other hand, SQL functions that never invoke
+** [sqlite3_result_subtype()] should avoid setting this property, as the
+** purpose of this property is to disable certain optimizations that are
+** incompatible with subtypes.
** </dd>
** </dl>
*/
@@ -5845,6 +5917,7 @@ SQLITE_API int sqlite3_create_window_function(
#define SQLITE_DIRECTONLY 0x000080000
#define SQLITE_SUBTYPE 0x000100000
#define SQLITE_INNOCUOUS 0x000200000
+#define SQLITE_RESULT_SUBTYPE 0x001000000
/*
** CAPI3REF: Deprecated Functions
@@ -6041,6 +6114,12 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*);
** information can be used to pass a limited amount of context from
** one SQL function to another. Use the [sqlite3_result_subtype()]
** routine to set the subtype for the return value of an SQL function.
+**
+** Every [application-defined SQL function] that invoke this interface
+** should include the [SQLITE_SUBTYPE] property in the text
+** encoding argument when the function is [sqlite3_create_function|registered].
+** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype()
+** might return zero instead of the upstream subtype in some corner cases.
*/
SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);
@@ -6139,48 +6218,56 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
** METHOD: sqlite3_context
**
** These functions may be used by (non-aggregate) SQL functions to
-** associate metadata with argument values. If the same value is passed to
-** multiple invocations of the same SQL function during query execution, under
-** some circumstances the associated metadata may be preserved. An example
-** of where this might be useful is in a regular-expression matching
-** function. The compiled version of the regular expression can be stored as
-** metadata associated with the pattern string.
+** associate auxiliary data with argument values. If the same argument
+** value is passed to multiple invocations of the same SQL function during
+** query execution, under some circumstances the associated auxiliary data
+** might be preserved. An example of where this might be useful is in a
+** regular-expression matching function. The compiled version of the regular
+** expression can be stored as auxiliary data associated with the pattern string.
** Then as long as the pattern string remains the same,
** the compiled regular expression can be reused on multiple
** invocations of the same function.
**
-** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata
+** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the auxiliary data
** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument
** value to the application-defined function. ^N is zero for the left-most
-** function argument. ^If there is no metadata
+** function argument. ^If there is no auxiliary data
** associated with the function argument, the sqlite3_get_auxdata(C,N) interface
** returns a NULL pointer.
**
-** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th
-** argument of the application-defined function. ^Subsequent
+** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as auxiliary data for the
+** N-th argument of the application-defined function. ^Subsequent
** calls to sqlite3_get_auxdata(C,N) return P from the most recent
-** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or
-** NULL if the metadata has been discarded.
+** sqlite3_set_auxdata(C,N,P,X) call if the auxiliary data is still valid or
+** NULL if the auxiliary data has been discarded.
** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL,
** SQLite will invoke the destructor function X with parameter P exactly
-** once, when the metadata is discarded.
-** SQLite is free to discard the metadata at any time, including: <ul>
+** once, when the auxiliary data is discarded.
+** SQLite is free to discard the auxiliary data at any time, including: <ul>
** <li> ^(when the corresponding function parameter changes)^, or
** <li> ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the
** SQL statement)^, or
** <li> ^(when sqlite3_set_auxdata() is invoked again on the same
** parameter)^, or
** <li> ^(during the original sqlite3_set_auxdata() call when a memory
-** allocation error occurs.)^ </ul>
+** allocation error occurs.)^
+** <li> ^(during the original sqlite3_set_auxdata() call if the function
+** is evaluated during query planning instead of during query execution,
+** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ </ul>
**
-** Note the last bullet in particular. The destructor X in
+** Note the last two bullets in particular. The destructor X in
** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the
** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata()
** should be called near the end of the function implementation and the
** function implementation should not make any use of P after
-** sqlite3_set_auxdata() has been called.
-**
-** ^(In practice, metadata is preserved between function calls for
+** sqlite3_set_auxdata() has been called. Furthermore, a call to
+** sqlite3_get_auxdata() that occurs immediately after a corresponding call
+** to sqlite3_set_auxdata() might still return NULL if an out-of-memory
+** condition occurred during the sqlite3_set_auxdata() call or if the
+** function is being evaluated during query planning rather than during
+** query execution.
+**
+** ^(In practice, auxiliary data is preserved between function calls for
** function parameters that are compile-time constants, including literal
** values and [parameters] and expressions composed from the same.)^
**
@@ -6190,10 +6277,67 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
**
** These routines must be called from the same thread in which
** the SQL function is running.
+**
+** See also: [sqlite3_get_clientdata()] and [sqlite3_set_clientdata()].
*/
SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N);
SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
+/*
+** CAPI3REF: Database Connection Client Data
+** METHOD: sqlite3
+**
+** These functions are used to associate one or more named pointers
+** with a [database connection].
+** A call to sqlite3_set_clientdata(D,N,P,X) causes the pointer P
+** to be attached to [database connection] D using name N. Subsequent
+** calls to sqlite3_get_clientdata(D,N) will return a copy of pointer P
+** or a NULL pointer if there were no prior calls to
+** sqlite3_set_clientdata() with the same values of D and N.
+** Names are compared using strcmp() and are thus case sensitive.
+**
+** If P and X are both non-NULL, then the destructor X is invoked with
+** argument P on the first of the following occurrences:
+** <ul>
+** <li> An out-of-memory error occurs during the call to
+** sqlite3_set_clientdata() which attempts to register pointer P.
+** <li> A subsequent call to sqlite3_set_clientdata(D,N,P,X) is made
+** with the same D and N parameters.
+** <li> The database connection closes. SQLite does not make any guarantees
+** about the order in which destructors are called, only that all
+** destructors will be called exactly once at some point during the
+** database connection closing process.
+** </ul>
+**
+** SQLite does not do anything with client data other than invoke
+** destructors on the client data at the appropriate time. The intended
+** use for client data is to provide a mechanism for wrapper libraries
+** to store additional information about an SQLite database connection.
+**
+** There is no limit (other than available memory) on the number of different
+** client data pointers (with different names) that can be attached to a
+** single database connection. However, the implementation is optimized
+** for the case of having only one or two different client data names.
+** Applications and wrapper libraries are discouraged from using more than
+** one client data name each.
+**
+** There is no way to enumerate the client data pointers
+** associated with a database connection. The N parameter can be thought
+** of as a secret key such that only code that knows the secret key is able
+** to access the associated data.
+**
+** Security Warning: These interfaces should not be exposed in scripting
+** languages or in other circumstances where it might be possible for an
+** an attacker to invoke them. Any agent that can invoke these interfaces
+** can probably also take control of the process.
+**
+** Database connection client data is only available for SQLite
+** version 3.44.0 ([dateof:3.44.0]) and later.
+**
+** See also: [sqlite3_set_auxdata()] and [sqlite3_get_auxdata()].
+*/
+SQLITE_API void *sqlite3_get_clientdata(sqlite3*,const char*);
+SQLITE_API int sqlite3_set_clientdata(sqlite3*, const char*, void*, void(*)(void*));
/*
** CAPI3REF: Constants Defining Special Destructor Behavior
@@ -6395,6 +6539,20 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
** higher order bits are discarded.
** The number of subtype bytes preserved by SQLite might increase
** in future releases of SQLite.
+**
+** Every [application-defined SQL function] that invokes this interface
+** should include the [SQLITE_RESULT_SUBTYPE] property in its
+** text encoding argument when the SQL function is
+** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE]
+** property is omitted from the function that invokes sqlite3_result_subtype(),
+** then in some cases the sqlite3_result_subtype() might fail to set
+** the result subtype.
+**
+** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any
+** SQL function that invokes the sqlite3_result_subtype() interface
+** and that does not have the SQLITE_RESULT_SUBTYPE property will raise
+** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1
+** by default.
*/
SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);
@@ -6826,7 +6984,7 @@ SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema);
/*
-** CAPI3REF: Allowed return values from [sqlite3_txn_state()]
+** CAPI3REF: Allowed return values from sqlite3_txn_state()
** KEYWORDS: {transaction state}
**
** These constants define the current transaction state of a database file.
@@ -6958,7 +7116,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
** ^Each call to the sqlite3_autovacuum_pages() interface overrides all
** previous invocations for that database connection. ^If the callback
** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer,
-** then the autovacuum steps callback is cancelled. The return value
+** then the autovacuum steps callback is canceled. The return value
** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might
** be some other error code if something goes wrong. The current
** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other
@@ -7477,6 +7635,10 @@ struct sqlite3_module {
/* The methods above are in versions 1 and 2 of the sqlite_module object.
** Those below are for version 3 and greater. */
int (*xShadowName)(const char*);
+ /* The methods above are in versions 1 through 3 of the sqlite_module object.
+ ** Those below are for version 4 and greater. */
+ int (*xIntegrity)(sqlite3_vtab *pVTab, const char *zSchema,
+ const char *zTabName, int mFlags, char **pzErr);
};
/*
@@ -7964,7 +8126,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
** code is returned and the transaction rolled back.
**
** Calling this function with an argument that is not a NULL pointer or an
-** open blob handle results in undefined behaviour. ^Calling this routine
+** open blob handle results in undefined behavior. ^Calling this routine
** with a null pointer (such as would be returned by a failed call to
** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function
** is passed a valid open blob handle, the values returned by the
@@ -8191,9 +8353,11 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*);
**
** ^(Some systems (for example, Windows 95) do not support the operation
** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try()
-** will always return SQLITE_BUSY. The SQLite core only ever uses
-** sqlite3_mutex_try() as an optimization so this is acceptable
-** behavior.)^
+** will always return SQLITE_BUSY. In most cases the SQLite core only uses
+** sqlite3_mutex_try() as an optimization, so this is acceptable
+** behavior. The exceptions are unix builds that set the
+** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working
+** sqlite3_mutex_try() is required.)^
**
** ^The sqlite3_mutex_leave() routine exits a mutex that was
** previously entered by the same thread. The behavior
@@ -8444,6 +8608,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_PRNG_SAVE 5
#define SQLITE_TESTCTRL_PRNG_RESTORE 6
#define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */
+#define SQLITE_TESTCTRL_FK_NO_ACTION 7
#define SQLITE_TESTCTRL_BITVEC_TEST 8
#define SQLITE_TESTCTRL_FAULT_INSTALL 9
#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10
@@ -8451,6 +8616,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_ASSERT 12
#define SQLITE_TESTCTRL_ALWAYS 13
#define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */
+#define SQLITE_TESTCTRL_JSON_SELFCHECK 14
#define SQLITE_TESTCTRL_OPTIMIZATIONS 15
#define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */
@@ -8472,7 +8638,8 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_TRACEFLAGS 31
#define SQLITE_TESTCTRL_TUNE 32
#define SQLITE_TESTCTRL_LOGEST 33
-#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */
+#define SQLITE_TESTCTRL_USELONGDOUBLE 34
+#define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */
/*
** CAPI3REF: SQL Keyword Checking
@@ -9928,7 +10095,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
** [[SQLITE_VTAB_DIRECTONLY]]<dt>SQLITE_VTAB_DIRECTONLY</dt>
** <dd>Calls of the form
** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the
-** the [xConnect] or [xCreate] methods of a [virtual table] implmentation
+** the [xConnect] or [xCreate] methods of a [virtual table] implementation
** prohibits that virtual table from being used from within triggers and
** views.
** </dd>
@@ -10118,7 +10285,7 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info*);
** communicated to the xBestIndex method as a
** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use
** this constraint, it must set the corresponding
-** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under
+** aConstraintUsage[].argvIndex to a positive integer. ^(Then, under
** the usual mode of handling IN operators, SQLite generates [bytecode]
** that invokes the [xFilter|xFilter() method] once for each value
** on the right-hand side of the IN operator.)^ Thus the virtual table
@@ -10547,7 +10714,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
** When the [sqlite3_blob_write()] API is used to update a blob column,
** the pre-update hook is invoked with SQLITE_DELETE. This is because the
** in this case the new values are not available. In this case, when a
-** callback made with op==SQLITE_DELETE is actuall a write using the
+** callback made with op==SQLITE_DELETE is actually a write using the
** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns
** the index of the column being written. In other cases, where the
** pre-update hook is being invoked for some other reason, including a
@@ -10808,6 +10975,13 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c
** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy
** of the database exists.
**
+** After the call, if the SQLITE_SERIALIZE_NOCOPY bit had been set,
+** the returned buffer content will remain accessible and unchanged
+** until either the next write operation on the connection or when
+** the connection is closed, and applications must not modify the
+** buffer. If the bit had been clear, the returned buffer will not
+** be accessed by SQLite after the call.
+**
** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the
** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory
** allocation error occurs.
@@ -10856,6 +11030,9 @@ SQLITE_API unsigned char *sqlite3_serialize(
** SQLite will try to increase the buffer size using sqlite3_realloc64()
** if writes on the database cause it to grow larger than M bytes.
**
+** Applications must not modify the buffer P or invalidate it before
+** the database connection D is closed.
+**
** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the
** database is currently in a read transaction or is involved in a backup
** operation.
@@ -10864,6 +11041,13 @@ SQLITE_API unsigned char *sqlite3_serialize(
** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the
** function returns SQLITE_ERROR.
**
+** The deserialized database should not be in [WAL mode]. If the database
+** is in WAL mode, then any attempt to use the database file will result
+** in an [SQLITE_CANTOPEN] error. The application can set the
+** [file format version numbers] (bytes 18 and 19) of the input database P
+** to 0x01 prior to invoking sqlite3_deserialize(D,S,P,N,M,F) to force the
+** database file into rollback mode and work around this limitation.
+**
** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the
** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then
** [sqlite3_free()] is invoked on argument P prior to returning.
@@ -11937,6 +12121,18 @@ SQLITE_API int sqlite3changeset_concat(
/*
+** CAPI3REF: Upgrade the Schema of a Changeset/Patchset
+*/
+SQLITE_API int sqlite3changeset_upgrade(
+ sqlite3 *db,
+ const char *zDb,
+ int nIn, const void *pIn, /* Input changeset */
+ int *pnOut, void **ppOut /* OUT: Inverse of input */
+);
+
+
+
+/*
** CAPI3REF: Changegroup Handle
**
** A changegroup is an object used to combine two or more
@@ -11983,6 +12179,38 @@ typedef struct sqlite3_changegroup sqlite3_changegroup;
SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
/*
+** CAPI3REF: Add a Schema to a Changegroup
+** METHOD: sqlite3_changegroup_schema
+**
+** This method may be used to optionally enforce the rule that the changesets
+** added to the changegroup handle must match the schema of database zDb
+** ("main", "temp", or the name of an attached database). If
+** sqlite3changegroup_add() is called to add a changeset that is not compatible
+** with the configured schema, SQLITE_SCHEMA is returned and the changegroup
+** object is left in an undefined state.
+**
+** A changeset schema is considered compatible with the database schema in
+** the same way as for sqlite3changeset_apply(). Specifically, for each
+** table in the changeset, there exists a database table with:
+**
+** <ul>
+** <li> The name identified by the changeset, and
+** <li> at least as many columns as recorded in the changeset, and
+** <li> the primary key columns in the same position as recorded in
+** the changeset.
+** </ul>
+**
+** The output of the changegroup object always has the same schema as the
+** database nominated using this function. In cases where changesets passed
+** to sqlite3changegroup_add() have fewer columns than the corresponding table
+** in the database schema, these are filled in using the default column
+** values from the database schema. This makes it possible to combined
+** changesets that have different numbers of columns for a single table
+** within a changegroup, provided that they are otherwise compatible.
+*/
+SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const char *zDb);
+
+/*
** CAPI3REF: Add A Changeset To A Changegroup
** METHOD: sqlite3_changegroup
**
@@ -12050,13 +12278,18 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
** If the new changeset contains changes to a table that is already present
** in the changegroup, then the number of columns and the position of the
** primary key columns for the table must be consistent. If this is not the
-** case, this function fails with SQLITE_SCHEMA. If the input changeset
-** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is
-** returned. Or, if an out-of-memory condition occurs during processing, this
-** function returns SQLITE_NOMEM. In all cases, if an error occurs the state
-** of the final contents of the changegroup is undefined.
+** case, this function fails with SQLITE_SCHEMA. Except, if the changegroup
+** object has been configured with a database schema using the
+** sqlite3changegroup_schema() API, then it is possible to combine changesets
+** with different numbers of columns for a single table, provided that
+** they are otherwise compatible.
**
-** If no error occurs, SQLITE_OK is returned.
+** If the input changeset appears to be corrupt and the corruption is
+** detected, SQLITE_CORRUPT is returned. Or, if an out-of-memory condition
+** occurs during processing, this function returns SQLITE_NOMEM.
+**
+** In all cases, if an error occurs the state of the final contents of the
+** changegroup is undefined. If no error occurs, SQLITE_OK is returned.
*/
SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
@@ -12321,10 +12554,17 @@ SQLITE_API int sqlite3changeset_apply_v2(
** <li>an insert change if all fields of the conflicting row match
** the row being inserted.
** </ul>
+**
+** <dt>SQLITE_CHANGESETAPPLY_FKNOACTION <dd>
+** If this flag it set, then all foreign key constraints in the target
+** database behave as if they were declared with "ON UPDATE NO ACTION ON
+** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL
+** or SET DEFAULT.
*/
#define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001
#define SQLITE_CHANGESETAPPLY_INVERT 0x0002
#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004
+#define SQLITE_CHANGESETAPPLY_FKNOACTION 0x0008
/*
** CAPI3REF: Constants Passed To The Conflict Handler
@@ -12890,8 +13130,11 @@ struct Fts5PhraseIter {
** created with the "columnsize=0" option.
**
** xColumnText:
-** This function attempts to retrieve the text of column iCol of the
-** current document. If successful, (*pz) is set to point to a buffer
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of columns in the table, SQLITE_RANGE is returned.
+**
+** Otherwise, this function attempts to retrieve the text of column iCol of
+** the current document. If successful, (*pz) is set to point to a buffer
** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
** if an error occurs, an SQLite error code is returned and the final values
@@ -12901,8 +13144,10 @@ struct Fts5PhraseIter {
** Returns the number of phrases in the current query expression.
**
** xPhraseSize:
-** Returns the number of tokens in phrase iPhrase of the query. Phrases
-** are numbered starting from zero.
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of phrases in the current query, as returned by xPhraseCount,
+** 0 is returned. Otherwise, this function returns the number of tokens in
+** phrase iPhrase of the query. Phrases are numbered starting from zero.
**
** xInstCount:
** Set *pnInst to the total number of occurrences of all phrases within
@@ -12918,12 +13163,13 @@ struct Fts5PhraseIter {
** Query for the details of phrase match iIdx within the current row.
** Phrase matches are numbered starting from zero, so the iIdx argument
** should be greater than or equal to zero and smaller than the value
-** output by xInstCount().
+** output by xInstCount(). If iIdx is less than zero or greater than
+** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned.
**
-** Usually, output parameter *piPhrase is set to the phrase number, *piCol
+** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol
** to the column in which it occurs and *piOff the token offset of the
-** first token of the phrase. Returns SQLITE_OK if successful, or an error
-** code (i.e. SQLITE_NOMEM) if an error occurs.
+** first token of the phrase. SQLITE_OK is returned if successful, or an
+** error code (i.e. SQLITE_NOMEM) if an error occurs.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
@@ -12949,6 +13195,10 @@ struct Fts5PhraseIter {
** Invoking Api.xUserData() returns a copy of the pointer passed as
** the third argument to pUserData.
**
+** If parameter iPhrase is less than zero, or greater than or equal to
+** the number of phrases in the query, as returned by xPhraseCount(),
+** this function returns SQLITE_RANGE.
+**
** If the callback function returns any value other than SQLITE_OK, the
** query is abandoned and the xQueryPhrase function returns immediately.
** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
@@ -13063,6 +13313,39 @@ struct Fts5PhraseIter {
**
** xPhraseNextColumn()
** See xPhraseFirstColumn above.
+**
+** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase iPhrase of the current
+** query. Before returning, output parameter *ppToken is set to point
+** to a buffer containing the requested token, and *pnToken to the
+** size of this buffer in bytes.
+**
+** If iPhrase or iToken are less than zero, or if iPhrase is greater than
+** or equal to the number of phrases in the query as reported by
+** xPhraseCount(), or if iToken is equal to or greater than the number of
+** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken
+ are both zeroed.
+**
+** The output text is not a copy of the query text that specified the
+** token. It is the output of the tokenizer module. For tokendata=1
+** tables, this includes any embedded 0x00 and trailing data.
+**
+** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase hit iIdx within the
+** current row. If iIdx is less than zero or greater than or equal to the
+** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise,
+** output variable (*ppToken) is set to point to a buffer containing the
+** matching document token, and (*pnToken) to the size of that buffer in
+** bytes. This API is not available if the specified token matches a
+** prefix query term. In that case both output variables are always set
+** to 0.
+**
+** The output text is not a copy of the document text that was tokenized.
+** It is the output of the tokenizer module. For tokendata=1 tables, this
+** includes any embedded 0x00 and trailing data.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option.
*/
struct Fts5ExtensionApi {
int iVersion; /* Currently always set to 3 */
@@ -13100,6 +13383,13 @@ struct Fts5ExtensionApi {
int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
+
+ /* Below this point are iVersion>=3 only */
+ int (*xQueryToken)(Fts5Context*,
+ int iPhrase, int iToken,
+ const char **ppToken, int *pnToken
+ );
+ int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*);
};
/*
@@ -13294,8 +13584,8 @@ struct Fts5ExtensionApi {
** as separate queries of the FTS index are required for each synonym.
**
** When using methods (2) or (3), it is important that the tokenizer only
-** provide synonyms when tokenizing document text (method (2)) or query
-** text (method (3)), not both. Doing so will not cause any errors, but is
+** provide synonyms when tokenizing document text (method (3)) or query
+** text (method (2)), not both. Doing so will not cause any errors, but is
** inefficient.
*/
typedef struct Fts5Tokenizer Fts5Tokenizer;
@@ -13343,7 +13633,7 @@ struct fts5_api {
int (*xCreateTokenizer)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_tokenizer *pTokenizer,
void (*xDestroy)(void*)
);
@@ -13352,7 +13642,7 @@ struct fts5_api {
int (*xFindTokenizer)(
fts5_api *pApi,
const char *zName,
- void **ppContext,
+ void **ppUserData,
fts5_tokenizer *pTokenizer
);
@@ -13360,7 +13650,7 @@ struct fts5_api {
int (*xCreateFunction)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_extension_function xFunction,
void (*xDestroy)(void*)
);
@@ -13471,7 +13761,7 @@ struct fts5_api {
** level of recursion for each term. A stack overflow can result
** if the number of terms is too large. In practice, most SQL
** never has more than 3 or 4 terms. Use a value of 0 to disable
-** any limit on the number of terms in a compount SELECT.
+** any limit on the number of terms in a compound SELECT.
*/
#ifndef SQLITE_MAX_COMPOUND_SELECT
# define SQLITE_MAX_COMPOUND_SELECT 500
@@ -13586,7 +13876,7 @@ struct fts5_api {
** max_page_count macro.
*/
#ifndef SQLITE_MAX_PAGE_COUNT
-# define SQLITE_MAX_PAGE_COUNT 1073741823
+# define SQLITE_MAX_PAGE_COUNT 0xfffffffe /* 4294967294 */
#endif
/*
@@ -13716,6 +14006,29 @@ struct fts5_api {
#endif
/*
+** Enable SQLITE_USE_SEH by default on MSVC builds. Only omit
+** SEH support if the -DSQLITE_OMIT_SEH option is given.
+*/
+#if defined(_MSC_VER) && !defined(SQLITE_OMIT_SEH)
+# define SQLITE_USE_SEH 1
+#else
+# undef SQLITE_USE_SEH
+#endif
+
+/*
+** Enable SQLITE_DIRECT_OVERFLOW_READ, unless the build explicitly
+** disables it using -DSQLITE_DIRECT_OVERFLOW_READ=0
+*/
+#if defined(SQLITE_DIRECT_OVERFLOW_READ) && SQLITE_DIRECT_OVERFLOW_READ+1==1
+ /* Disable if -DSQLITE_DIRECT_OVERFLOW_READ=0 */
+# undef SQLITE_DIRECT_OVERFLOW_READ
+#else
+ /* In all other cases, enable */
+# define SQLITE_DIRECT_OVERFLOW_READ 1
+#endif
+
+
+/*
** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2.
** 0 means mutexes are permanently disable and the library is never
** threadsafe. 1 means the library is serialized which is the highest
@@ -14574,8 +14887,31 @@ typedef INT16_TYPE LogEst;
** the end of buffer S. This macro returns true if P points to something
** contained within the buffer S.
*/
-#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E)))
+#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E)))
+/*
+** P is one byte past the end of a large buffer. Return true if a span of bytes
+** between S..E crosses the end of that buffer. In other words, return true
+** if the sub-buffer S..E-1 overflows the buffer whose last byte is P-1.
+**
+** S is the start of the span. E is one byte past the end of end of span.
+**
+** P
+** |-----------------| FALSE
+** |-------|
+** S E
+**
+** P
+** |-----------------|
+** |-------| TRUE
+** S E
+**
+** P
+** |-----------------|
+** |-------| FALSE
+** S E
+*/
+#define SQLITE_OVERFLOW(P,S,E) (((uptr)(S)<(uptr)(P))&&((uptr)(E)>(uptr)(P)))
/*
** Macros to determine whether the machine is big or little endian,
@@ -14585,16 +14921,33 @@ typedef INT16_TYPE LogEst;
** using C-preprocessor macros. If that is unsuccessful, or if
** -DSQLITE_BYTEORDER=0 is set, then byte-order is determined
** at run-time.
+**
+** If you are building SQLite on some obscure platform for which the
+** following ifdef magic does not work, you can always include either:
+**
+** -DSQLITE_BYTEORDER=1234
+**
+** or
+**
+** -DSQLITE_BYTEORDER=4321
+**
+** to cause the build to work for little-endian or big-endian processors,
+** respectively.
*/
-#ifndef SQLITE_BYTEORDER
-# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \
+#ifndef SQLITE_BYTEORDER /* Replicate changes at tag-20230904a */
+# if defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
+# define SQLITE_BYTEORDER 4321
+# elif defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__
+# define SQLITE_BYTEORDER 1234
+# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__==1
+# define SQLITE_BYTEORDER 4321
+# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \
defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \
defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
-# define SQLITE_BYTEORDER 1234
-# elif defined(sparc) || defined(__ppc__) || \
- defined(__ARMEB__) || defined(__AARCH64EB__)
-# define SQLITE_BYTEORDER 4321
+# define SQLITE_BYTEORDER 1234
+# elif defined(sparc) || defined(__ARMEB__) || defined(__AARCH64EB__)
+# define SQLITE_BYTEORDER 4321
# else
# define SQLITE_BYTEORDER 0
# endif
@@ -14809,7 +15162,7 @@ struct BusyHandler {
/*
** Name of table that holds the database schema.
**
-** The PREFERRED names are used whereever possible. But LEGACY is also
+** The PREFERRED names are used wherever possible. But LEGACY is also
** used for backwards compatibility.
**
** 1. Queries can use either the PREFERRED or the LEGACY names
@@ -14918,11 +15271,13 @@ typedef struct Column Column;
typedef struct Cte Cte;
typedef struct CteUse CteUse;
typedef struct Db Db;
+typedef struct DbClientData DbClientData;
typedef struct DbFixer DbFixer;
typedef struct Schema Schema;
typedef struct Expr Expr;
typedef struct ExprList ExprList;
typedef struct FKey FKey;
+typedef struct FpDecode FpDecode;
typedef struct FuncDestructor FuncDestructor;
typedef struct FuncDef FuncDef;
typedef struct FuncDefHash FuncDefHash;
@@ -14941,6 +15296,7 @@ typedef struct Parse Parse;
typedef struct ParseCleanup ParseCleanup;
typedef struct PreUpdate PreUpdate;
typedef struct PrintfArguments PrintfArguments;
+typedef struct RCStr RCStr;
typedef struct RenameToken RenameToken;
typedef struct Returning Returning;
typedef struct RowSet RowSet;
@@ -15554,7 +15910,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager*);
SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*);
SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*);
SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*);
-SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *);
+SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, u64*);
SQLITE_PRIVATE void sqlite3PagerClearCache(Pager*);
SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *);
@@ -15578,6 +15934,10 @@ SQLITE_PRIVATE void sqlite3PagerRefdump(Pager*);
# define enable_simulated_io_errors()
#endif
+#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL)
+SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager*);
+#endif
+
#endif /* SQLITE_PAGER_H */
/************** End of pager.h ***********************************************/
@@ -15907,9 +16267,7 @@ SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int flags);
SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor*);
SQLITE_PRIVATE void sqlite3BtreeCursorPin(BtCursor*);
SQLITE_PRIVATE void sqlite3BtreeCursorUnpin(BtCursor*);
-#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
SQLITE_PRIVATE i64 sqlite3BtreeOffset(BtCursor*);
-#endif
SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor*, u32 offset, u32 amt, void*);
SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt);
SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*);
@@ -16139,6 +16497,7 @@ typedef struct VdbeOpList VdbeOpList;
#define P4_INT64 (-13) /* P4 is a 64-bit signed integer */
#define P4_INTARRAY (-14) /* P4 is a vector of 32-bit integers */
#define P4_FUNCCTX (-15) /* P4 is a pointer to an sqlite3_context object */
+#define P4_TABLEREF (-16) /* Like P4_TABLE, but reference counted */
/* Error message codes for OP_Halt */
#define P5_ConstraintNotNull 1
@@ -16354,19 +16713,22 @@ typedef struct VdbeOpList VdbeOpList;
#define OP_VCreate 171
#define OP_VDestroy 172
#define OP_VOpen 173
-#define OP_VInitIn 174 /* synopsis: r[P2]=ValueList(P1,P3) */
-#define OP_VColumn 175 /* synopsis: r[P3]=vcolumn(P2) */
-#define OP_VRename 176
-#define OP_Pagecount 177
-#define OP_MaxPgcnt 178
-#define OP_ClrSubtype 179 /* synopsis: r[P1].subtype = 0 */
-#define OP_FilterAdd 180 /* synopsis: filter(P1) += key(P3@P4) */
-#define OP_Trace 181
-#define OP_CursorHint 182
-#define OP_ReleaseReg 183 /* synopsis: release r[P1@P2] mask P3 */
-#define OP_Noop 184
-#define OP_Explain 185
-#define OP_Abortable 186
+#define OP_VCheck 174
+#define OP_VInitIn 175 /* synopsis: r[P2]=ValueList(P1,P3) */
+#define OP_VColumn 176 /* synopsis: r[P3]=vcolumn(P2) */
+#define OP_VRename 177
+#define OP_Pagecount 178
+#define OP_MaxPgcnt 179
+#define OP_ClrSubtype 180 /* synopsis: r[P1].subtype = 0 */
+#define OP_GetSubtype 181 /* synopsis: r[P2] = r[P1].subtype */
+#define OP_SetSubtype 182 /* synopsis: r[P2].subtype = r[P1] */
+#define OP_FilterAdd 183 /* synopsis: filter(P1) += key(P3@P4) */
+#define OP_Trace 184
+#define OP_CursorHint 185
+#define OP_ReleaseReg 186 /* synopsis: release r[P1@P2] mask P3 */
+#define OP_Noop 187
+#define OP_Explain 188
+#define OP_Abortable 189
/* Properties such as "out2" or "jump" that are specified in
** comments following the "case" for each opcode in the vdbe.c
@@ -16384,7 +16746,7 @@ typedef struct VdbeOpList VdbeOpList;
/* 8 */ 0x01, 0x01, 0x01, 0x01, 0x03, 0x03, 0x01, 0x01,\
/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x49, 0x49, 0x49,\
/* 24 */ 0x49, 0x01, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49,\
-/* 32 */ 0x41, 0x01, 0x01, 0x01, 0x41, 0x01, 0x41, 0x41,\
+/* 32 */ 0x41, 0x01, 0x41, 0x41, 0x41, 0x01, 0x41, 0x41,\
/* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x23, 0x0b,\
/* 48 */ 0x01, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\
/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x41,\
@@ -16396,14 +16758,14 @@ typedef struct VdbeOpList VdbeOpList;
/* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\
/* 112 */ 0x40, 0x00, 0x12, 0x40, 0x40, 0x10, 0x40, 0x00,\
/* 120 */ 0x00, 0x00, 0x40, 0x00, 0x40, 0x40, 0x10, 0x10,\
-/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50,\
+/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x50,\
/* 136 */ 0x00, 0x40, 0x04, 0x04, 0x00, 0x40, 0x50, 0x40,\
/* 144 */ 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\
/* 152 */ 0x00, 0x10, 0x00, 0x00, 0x06, 0x10, 0x00, 0x04,\
/* 160 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x50, 0x40,\
-/* 176 */ 0x00, 0x10, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00,\
-/* 184 */ 0x00, 0x00, 0x00,}
+/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x50,\
+/* 176 */ 0x40, 0x00, 0x10, 0x10, 0x02, 0x12, 0x12, 0x00,\
+/* 184 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,}
/* The resolve3P2Values() routine is able to run faster if it knows
** the value of the largest JUMP opcode. The smaller the maximum
@@ -16578,7 +16940,7 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...);
** The VdbeCoverage macros are used to set a coverage testing point
** for VDBE branch instructions. The coverage testing points are line
** numbers in the sqlite3.c source file. VDBE branch coverage testing
-** only works with an amalagmation build. That's ok since a VDBE branch
+** only works with an amalgamation build. That's ok since a VDBE branch
** coverage build designed for testing the test suite only. No application
** should ever ship with VDBE branch coverage measuring turned on.
**
@@ -16596,7 +16958,7 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...);
** // NULL option is not possible
**
** VdbeCoverageEqNe(v) // Previous OP_Jump is only interested
-** // in distingishing equal and not-equal.
+** // in distinguishing equal and not-equal.
**
** Every VDBE branch operation must be tagged with one of the macros above.
** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and
@@ -16606,7 +16968,7 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...);
** During testing, the test application will invoke
** sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE,...) to set a callback
** routine that is invoked as each bytecode branch is taken. The callback
-** contains the sqlite3.c source line number ov the VdbeCoverage macro and
+** contains the sqlite3.c source line number of the VdbeCoverage macro and
** flags to indicate whether or not the branch was taken. The test application
** is responsible for keeping track of this and reporting byte-code branches
** that are never taken.
@@ -16945,7 +17307,7 @@ SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*);
/*
** Default synchronous levels.
**
-** Note that (for historcal reasons) the PAGER_SYNCHRONOUS_* macros differ
+** Note that (for historical reasons) the PAGER_SYNCHRONOUS_* macros differ
** from the SQLITE_DEFAULT_SYNCHRONOUS value by 1.
**
** PAGER_SYNCHRONOUS DEFAULT_SYNCHRONOUS
@@ -16984,7 +17346,7 @@ struct Db {
** An instance of the following structure stores a database schema.
**
** Most Schema objects are associated with a Btree. The exception is
-** the Schema for the TEMP databaes (sqlite3.aDb[1]) which is free-standing.
+** the Schema for the TEMP database (sqlite3.aDb[1]) which is free-standing.
** In shared cache mode, a single Schema object can be shared by multiple
** Btrees that refer to the same underlying BtShared object.
**
@@ -17095,7 +17457,7 @@ struct Lookaside {
LookasideSlot *pInit; /* List of buffers not previously used */
LookasideSlot *pFree; /* List of available buffers */
#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
- LookasideSlot *pSmallInit; /* List of small buffers not prediously used */
+ LookasideSlot *pSmallInit; /* List of small buffers not previously used */
LookasideSlot *pSmallFree; /* List of available small buffers */
void *pMiddle; /* First byte past end of full-size buffers and
** the first byte of LOOKASIDE_SMALL buffers */
@@ -17112,7 +17474,7 @@ struct LookasideSlot {
#define EnableLookaside db->lookaside.bDisable--;\
db->lookaside.sz=db->lookaside.bDisable?0:db->lookaside.szTrue
-/* Size of the smaller allocations in two-size lookside */
+/* Size of the smaller allocations in two-size lookaside */
#ifdef SQLITE_OMIT_TWOSIZE_LOOKASIDE
# define LOOKASIDE_SMALL 0
#else
@@ -17312,6 +17674,7 @@ struct sqlite3 {
i64 nDeferredCons; /* Net deferred constraints this transaction. */
i64 nDeferredImmCons; /* Net deferred immediate constraints */
int *pnBytesFreed; /* If not NULL, increment this in DbFree() */
+ DbClientData *pDbData; /* sqlite3_set_clientdata() content */
#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
/* The following variables are all protected by the STATIC_MAIN
** mutex, not by sqlite3.mutex. They are used by code in notify.c.
@@ -17394,6 +17757,7 @@ struct sqlite3 {
/* the count using a callback. */
#define SQLITE_CorruptRdOnly HI(0x00002) /* Prohibit writes due to error */
#define SQLITE_ReadUncommit HI(0x00004) /* READ UNCOMMITTED in shared-cache */
+#define SQLITE_FkNoAction HI(0x00008) /* Treat all FK as NO ACTION */
/* Flags used only if debugging */
#ifdef SQLITE_DEBUG
@@ -17451,6 +17815,7 @@ struct sqlite3 {
#define SQLITE_IndexedExpr 0x01000000 /* Pull exprs from index when able */
#define SQLITE_Coroutines 0x02000000 /* Co-routines for subqueries */
#define SQLITE_NullUnusedCols 0x04000000 /* NULL unused columns in subqueries */
+#define SQLITE_OnePass 0x08000000 /* Single-pass DELETE and UPDATE */
#define SQLITE_AllOpts 0xffffffff /* All optimizations */
/*
@@ -17533,6 +17898,7 @@ struct FuncDestructor {
** SQLITE_FUNC_ANYORDER == NC_OrderAgg == SF_OrderByReqd
** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG
** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG
+** SQLITE_FUNC_BYTELEN == OPFLAG_BYTELENARG
** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API
** SQLITE_FUNC_DIRECT == SQLITE_DIRECTONLY from the API
** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS -- opposite meanings!!!
@@ -17540,7 +17906,7 @@ struct FuncDestructor {
**
** Note that even though SQLITE_FUNC_UNSAFE and SQLITE_INNOCUOUS have the
** same bit value, their meanings are inverted. SQLITE_FUNC_UNSAFE is
-** used internally and if set means tha the function has side effects.
+** used internally and if set means that the function has side effects.
** SQLITE_INNOCUOUS is used by application code and means "not unsafe".
** See multiple instances of tag-20230109-1.
*/
@@ -17551,6 +17917,7 @@ struct FuncDestructor {
#define SQLITE_FUNC_NEEDCOLL 0x0020 /* sqlite3GetFuncCollSeq() might be called*/
#define SQLITE_FUNC_LENGTH 0x0040 /* Built-in length() function */
#define SQLITE_FUNC_TYPEOF 0x0080 /* Built-in typeof() function */
+#define SQLITE_FUNC_BYTELEN 0x00c0 /* Built-in octet_length() function */
#define SQLITE_FUNC_COUNT 0x0100 /* Built-in count(*) aggregate */
/* 0x0200 -- available for reuse */
#define SQLITE_FUNC_UNLIKELY 0x0400 /* Built-in unlikely() function */
@@ -17559,14 +17926,15 @@ struct FuncDestructor {
#define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a
** single query - might change over time */
#define SQLITE_FUNC_TEST 0x4000 /* Built-in testing functions */
-/* 0x8000 -- available for reuse */
+#define SQLITE_FUNC_RUNONLY 0x8000 /* Cannot be used by valueFromFunction */
#define SQLITE_FUNC_WINDOW 0x00010000 /* Built-in window-only function */
#define SQLITE_FUNC_INTERNAL 0x00040000 /* For use by NestedParse() only */
#define SQLITE_FUNC_DIRECT 0x00080000 /* Not for use in TRIGGERs or VIEWs */
-#define SQLITE_FUNC_SUBTYPE 0x00100000 /* Result likely to have sub-type */
+/* SQLITE_SUBTYPE 0x00100000 // Consumer of subtypes */
#define SQLITE_FUNC_UNSAFE 0x00200000 /* Function has side effects */
#define SQLITE_FUNC_INLINE 0x00400000 /* Functions implemented in-line */
#define SQLITE_FUNC_BUILTIN 0x00800000 /* This is a built-in function */
+/* SQLITE_RESULT_SUBTYPE 0x01000000 // Generator of subtypes */
#define SQLITE_FUNC_ANYORDER 0x08000000 /* count/min/max aggregate */
/* Identifier numbers for each in-line function */
@@ -17658,10 +18026,11 @@ struct FuncDestructor {
#define MFUNCTION(zName, nArg, xPtr, xFunc) \
{nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \
xPtr, 0, xFunc, 0, 0, 0, #zName, {0} }
-#define JFUNCTION(zName, nArg, iArg, xFunc) \
- {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|\
- SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \
- SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
+#define JFUNCTION(zName, nArg, bUseCache, bWS, bRS, bJsonB, iArg, xFunc) \
+ {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_FUNC_CONSTANT|\
+ SQLITE_UTF8|((bUseCache)*SQLITE_FUNC_RUNONLY)|\
+ ((bRS)*SQLITE_SUBTYPE)|((bWS)*SQLITE_RESULT_SUBTYPE), \
+ SQLITE_INT_TO_PTR(iArg|((bJsonB)*JSON_BLOB)),0,xFunc,0, 0, 0, #zName, {0} }
#define INLINE_FUNC(zName, nArg, iArg, mFlags) \
{nArg, SQLITE_FUNC_BUILTIN|\
SQLITE_UTF8|SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \
@@ -18130,7 +18499,7 @@ struct FKey {
** foreign key.
**
** The OE_Default value is a place holder that means to use whatever
-** conflict resolution algorthm is required from context.
+** conflict resolution algorithm is required from context.
**
** The following symbolic values are used to record which type
** of conflict resolution action to take.
@@ -18296,6 +18665,7 @@ struct Index {
unsigned isCovering:1; /* True if this is a covering index */
unsigned noSkipScan:1; /* Do not try to use skip-scan if true */
unsigned hasStat1:1; /* aiRowLogEst values come from sqlite_stat1 */
+ unsigned bLowQual:1; /* sqlite_stat1 says this is a low-quality index */
unsigned bNoQuery:1; /* Do not use this index to optimize queries */
unsigned bAscKeyBug:1; /* True if the bba7b69f9849b5bf bug applies */
unsigned bHasVCol:1; /* Index references one or more VIRTUAL columns */
@@ -18406,6 +18776,10 @@ struct AggInfo {
FuncDef *pFunc; /* The aggregate function implementation */
int iDistinct; /* Ephemeral table used to enforce DISTINCT */
int iDistAddr; /* Address of OP_OpenEphemeral */
+ int iOBTab; /* Ephemeral table to implement ORDER BY */
+ u8 bOBPayload; /* iOBTab has payload columns separate from key */
+ u8 bOBUnique; /* Enforce uniqueness on iOBTab keys */
+ u8 bUseSubtype; /* Transfer subtype info through sorter */
} *aFunc;
int nFunc; /* Number of entries in aFunc[] */
u32 selId; /* Select to which this AggInfo belongs */
@@ -18544,7 +18918,7 @@ struct Expr {
** TK_REGISTER: register number
** TK_TRIGGER: 1 -> new, 0 -> old
** EP_Unlikely: 134217728 times likelihood
- ** TK_IN: ephemerial table holding RHS
+ ** TK_IN: ephemeral table holding RHS
** TK_SELECT_COLUMN: Number of columns on the LHS
** TK_SELECT: 1st register of result vector */
ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid.
@@ -18590,7 +18964,7 @@ struct Expr {
#define EP_Reduced 0x004000 /* Expr struct EXPR_REDUCEDSIZE bytes only */
#define EP_Win 0x008000 /* Contains window functions */
#define EP_TokenOnly 0x010000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */
- /* 0x020000 // Available for reuse */
+#define EP_FullSize 0x020000 /* Expr structure must remain full sized */
#define EP_IfNullRow 0x040000 /* The TK_IF_NULL_ROW opcode */
#define EP_Unlikely 0x080000 /* unlikely() or likelihood() function */
#define EP_ConstFunc 0x100000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */
@@ -18620,12 +18994,15 @@ struct Expr {
#define ExprClearProperty(E,P) (E)->flags&=~(P)
#define ExprAlwaysTrue(E) (((E)->flags&(EP_OuterON|EP_IsTrue))==EP_IsTrue)
#define ExprAlwaysFalse(E) (((E)->flags&(EP_OuterON|EP_IsFalse))==EP_IsFalse)
+#define ExprIsFullSize(E) (((E)->flags&(EP_Reduced|EP_TokenOnly))==0)
/* Macros used to ensure that the correct members of unions are accessed
** in Expr.
*/
#define ExprUseUToken(E) (((E)->flags&EP_IntValue)==0)
#define ExprUseUValue(E) (((E)->flags&EP_IntValue)!=0)
+#define ExprUseWOfst(E) (((E)->flags&(EP_InnerON|EP_OuterON))==0)
+#define ExprUseWJoin(E) (((E)->flags&(EP_InnerON|EP_OuterON))!=0)
#define ExprUseXList(E) (((E)->flags&EP_xIsSelect)==0)
#define ExprUseXSelect(E) (((E)->flags&EP_xIsSelect)!=0)
#define ExprUseYTab(E) (((E)->flags&(EP_WinFunc|EP_Subrtn))==0)
@@ -18735,6 +19112,7 @@ struct ExprList {
#define ENAME_NAME 0 /* The AS clause of a result set */
#define ENAME_SPAN 1 /* Complete text of the result set expression */
#define ENAME_TAB 2 /* "DB.TABLE.NAME" for the result set */
+#define ENAME_ROWID 3 /* "DB.TABLE._rowid_" for * expansion of rowid */
/*
** An instance of this structure can hold a simple list of identifiers,
@@ -18814,7 +19192,7 @@ struct SrcItem {
unsigned notCte :1; /* This item may not match a CTE */
unsigned isUsing :1; /* u3.pUsing is valid */
unsigned isOn :1; /* u3.pOn was once valid and non-NULL */
- unsigned isSynthUsing :1; /* u3.pUsing is synthensized from NATURAL */
+ unsigned isSynthUsing :1; /* u3.pUsing is synthesized from NATURAL */
unsigned isNestedFrom :1; /* pSelect is a SF_NestedFrom subquery */
} fg;
int iCursor; /* The VDBE cursor number used to access this table */
@@ -18935,6 +19313,7 @@ struct NameContext {
int nRef; /* Number of names resolved by this context */
int nNcErr; /* Number of errors encountered while resolving names */
int ncFlags; /* Zero or more NC_* flags defined below */
+ u32 nNestedSelect; /* Number of nested selects using this NC */
Select *pWinSelect; /* SELECT statement for any window functions */
};
@@ -19343,6 +19722,7 @@ struct Parse {
int *aLabel; /* Space to hold the labels */
ExprList *pConstExpr;/* Constant expressions */
IndexedExpr *pIdxEpr;/* List of expressions used by active indexes */
+ IndexedExpr *pIdxPartExpr; /* Exprs constrained by index WHERE clauses */
Token constraintName;/* Name of the constraint currently being parsed */
yDbMask writeMask; /* Start a write transaction on these databases */
yDbMask cookieMask; /* Bitmask of schema verified databases */
@@ -19350,6 +19730,9 @@ struct Parse {
int regRoot; /* Register holding root page number for new objects */
int nMaxArg; /* Max args passed to user function by sub-program */
int nSelect; /* Number of SELECT stmts. Counter for Select.selId */
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ u32 nProgressSteps; /* xProgress steps taken during sqlite3_prepare() */
+#endif
#ifndef SQLITE_OMIT_SHARED_CACHE
int nTableLock; /* Number of locks in aTableLock */
TableLock *aTableLock; /* Required table locks for shared-cache mode */
@@ -19363,12 +19746,9 @@ struct Parse {
int addrCrTab; /* Address of OP_CreateBtree on CREATE TABLE */
Returning *pReturning; /* The RETURNING clause */
} u1;
- u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */
u32 oldmask; /* Mask of old.* columns referenced */
u32 newmask; /* Mask of new.* columns referenced */
-#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
- u32 nProgressSteps; /* xProgress steps taken during sqlite3_prepare() */
-#endif
+ LogEst nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */
u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */
u8 bReturning; /* Coding a RETURNING trigger */
u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */
@@ -19492,6 +19872,7 @@ struct AuthContext {
#define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */
#define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */
#define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */
+#define OPFLAG_BYTELENARG 0xc0 /* OP_Column only for octet_length() */
#define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */
#define OPFLAG_SEEKEQ 0x02 /* OP_Open** cursor uses EQ seek only */
#define OPFLAG_FORDELETE 0x08 /* OP_Open should use BTREE_FORDELETE */
@@ -19613,6 +19994,7 @@ struct Returning {
int iRetCur; /* Transient table holding RETURNING results */
int nRetCol; /* Number of in pReturnEL after expansion */
int iRetReg; /* Register array for holding a row of RETURNING */
+ char zName[40]; /* Name of trigger: "sqlite_returning_%p" */
};
/*
@@ -19634,6 +20016,28 @@ struct sqlite3_str {
#define isMalloced(X) (((X)->printfFlags & SQLITE_PRINTF_MALLOCED)!=0)
+/*
+** The following object is the header for an "RCStr" or "reference-counted
+** string". An RCStr is passed around and used like any other char*
+** that has been dynamically allocated. The important interface
+** differences:
+**
+** 1. RCStr strings are reference counted. They are deallocated
+** when the reference count reaches zero.
+**
+** 2. Use sqlite3RCStrUnref() to free an RCStr string rather than
+** sqlite3_free()
+**
+** 3. Make a (read-only) copy of a read-only RCStr string using
+** sqlite3RCStrRef().
+**
+** "String" is in the name, but an RCStr object can also be used to hold
+** binary data.
+*/
+struct RCStr {
+ u64 nRCRef; /* Number of references */
+ /* Total structure size should be a multiple of 8 bytes for alignment */
+};
/*
** A pointer to this structure is used to communicate information
@@ -19660,7 +20064,7 @@ typedef struct {
/* Tuning parameters are set using SQLITE_TESTCTRL_TUNE and are controlled
** on debug-builds of the CLI using ".testctrl tune ID VALUE". Tuning
** parameters are for temporary use during development, to help find
-** optimial values for parameters in the query planner. The should not
+** optimal values for parameters in the query planner. The should not
** be used on trunk check-ins. They are a temporary mechanism available
** for transient development builds only.
**
@@ -19686,6 +20090,10 @@ struct Sqlite3Config {
u8 bUseCis; /* Use covering indices for full-scans */
u8 bSmallMalloc; /* Avoid large memory allocations if true */
u8 bExtraSchemaChecks; /* Verify type,name,tbl_name in schema */
+ u8 bUseLongDouble; /* Make use of long double */
+#ifdef SQLITE_DEBUG
+ u8 bJsonSelfcheck; /* Double-check JSON parsing */
+#endif
int mxStrlen; /* Maximum string length */
int neverCorrupt; /* Database is always well-formed */
int szLookaside; /* Default lookaside buffer size */
@@ -19772,6 +20180,7 @@ struct Walker {
void (*xSelectCallback2)(Walker*,Select*);/* Second callback for SELECTs */
int walkerDepth; /* Number of subqueries */
u16 eCode; /* A small processing code */
+ u16 mWFlags; /* Use-dependent flags */
union { /* Extra data for callback */
NameContext *pNC; /* Naming context */
int n; /* A counter */
@@ -19811,6 +20220,7 @@ struct DbFixer {
/* Forward declarations */
SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*);
+SQLITE_PRIVATE int sqlite3WalkExprNN(Walker*, Expr*);
SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*);
SQLITE_PRIVATE int sqlite3WalkSelect(Walker*, Select*);
SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker*, Select*);
@@ -19891,6 +20301,16 @@ struct CteUse {
};
+/* Client data associated with sqlite3_set_clientdata() and
+** sqlite3_get_clientdata().
+*/
+struct DbClientData {
+ DbClientData *pNext; /* Next in a linked list */
+ void *pData; /* The data */
+ void (*xDestructor)(void*); /* Destructor. Might be NULL */
+ char zName[1]; /* Name of this client data. MUST BE LAST */
+};
+
#ifdef SQLITE_DEBUG
/*
** An instance of the TreeView object is used for printing the content of
@@ -20192,6 +20612,20 @@ struct PrintfArguments {
sqlite3_value **apArg; /* The argument values */
};
+/*
+** An instance of this object receives the decoding of a floating point
+** value into an approximate decimal representation.
+*/
+struct FpDecode {
+ char sign; /* '+' or '-' */
+ char isSpecial; /* 1: Infinity 2: NaN */
+ int n; /* Significant digits in the decode */
+ int iDP; /* Location of the decimal point */
+ char *z; /* Start of significant digits */
+ char zBuf[24]; /* Storage for significant digits */
+};
+
+SQLITE_PRIVATE void sqlite3FpDecode(FpDecode*,double,int,int);
SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3*,const char*, ...);
SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3*,const char*, va_list);
#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
@@ -20281,9 +20715,12 @@ SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*);
SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse*,Expr*, Expr*);
SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr*);
SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, const Token*, int);
+SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy(Parse*,Expr*,ExprList*);
+SQLITE_PRIVATE void sqlite3ExprOrderByAggregateError(Parse*,Expr*);
SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*);
SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32);
SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*);
+SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3*,void*);
SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse*, Expr*);
SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*);
SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*);
@@ -20293,6 +20730,7 @@ SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList*,int,int);
SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,const Token*,int);
SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,const char*,const char*);
SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*);
+SQLITE_PRIVATE void sqlite3ExprListDeleteGeneric(sqlite3*,void*);
SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*);
SQLITE_PRIVATE int sqlite3IndexHasDuplicateRootPage(Index*);
SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**);
@@ -20383,6 +20821,7 @@ SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask);
SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int);
SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int);
SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*);
+SQLITE_PRIVATE void sqlite3DeleteTableGeneric(sqlite3*, void*);
SQLITE_PRIVATE void sqlite3FreeIndex(sqlite3*, Index*);
#ifndef SQLITE_OMIT_AUTOINCREMENT
SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse);
@@ -20419,6 +20858,7 @@ SQLITE_PRIVATE int sqlite3Select(Parse*, Select*, SelectDest*);
SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*,
Expr*,ExprList*,u32,Expr*);
SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*);
+SQLITE_PRIVATE void sqlite3SelectDeleteGeneric(sqlite3*,void*);
SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*);
SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, Trigger*);
SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int);
@@ -20482,7 +20922,7 @@ SQLITE_PRIVATE int sqlite3ExprCompare(const Parse*,const Expr*,const Expr*, int)
SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr*,Expr*,int);
SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList*,const ExprList*, int);
SQLITE_PRIVATE int sqlite3ExprImpliesExpr(const Parse*,const Expr*,const Expr*, int);
-SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int);
+SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int,int);
SQLITE_PRIVATE void sqlite3AggInfoPersistWalkerInit(Walker*,Parse*);
SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*);
SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*);
@@ -20517,6 +20957,7 @@ SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr*, int*);
SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*);
SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char);
SQLITE_PRIVATE int sqlite3IsRowid(const char*);
+SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab);
SQLITE_PRIVATE void sqlite3GenerateRowDelete(
Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8,int);
SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*, int);
@@ -20631,6 +21072,7 @@ SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*);
SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*);
SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*);
SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*);
+
SQLITE_PRIVATE int sqlite3RealSameAsInt(double,sqlite3_int64);
SQLITE_PRIVATE i64 sqlite3RealToI64(double);
SQLITE_PRIVATE int sqlite3Int64ToText(i64,char*);
@@ -20643,6 +21085,7 @@ SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nChar);
#endif
SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte);
SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**);
+SQLITE_PRIVATE int sqlite3Utf8ReadLimited(const u8*, int, u32*);
SQLITE_PRIVATE LogEst sqlite3LogEst(u64);
SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst,LogEst);
SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double);
@@ -20735,6 +21178,7 @@ SQLITE_PRIVATE void sqlite3FileSuffix3(const char*, char*);
SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,u8);
SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value*, u8);
+SQLITE_PRIVATE int sqlite3ValueIsOfClass(const sqlite3_value*, void(*)(void*));
SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value*, u8);
SQLITE_PRIVATE void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8,
void(*)(void*));
@@ -20786,7 +21230,8 @@ SQLITE_PRIVATE int sqlite3MatchEName(
const struct ExprList_item*,
const char*,
const char*,
- const char*
+ const char*,
+ int*
);
SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr*);
SQLITE_PRIVATE u8 sqlite3StrIHash(const char*);
@@ -20842,6 +21287,11 @@ SQLITE_PRIVATE void sqlite3OomClear(sqlite3*);
SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int);
SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *);
+SQLITE_PRIVATE char *sqlite3RCStrRef(char*);
+SQLITE_PRIVATE void sqlite3RCStrUnref(void*);
+SQLITE_PRIVATE char *sqlite3RCStrNew(u64);
+SQLITE_PRIVATE char *sqlite3RCStrResize(char*,u64);
+
SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int);
SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum*, i64);
SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*);
@@ -20982,6 +21432,7 @@ SQLITE_PRIVATE Cte *sqlite3CteNew(Parse*,Token*,ExprList*,Select*,u8);
SQLITE_PRIVATE void sqlite3CteDelete(sqlite3*,Cte*);
SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Cte*);
SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*);
+SQLITE_PRIVATE void sqlite3WithDeleteGeneric(sqlite3*,void*);
SQLITE_PRIVATE With *sqlite3WithPush(Parse*, With*, u8);
#else
# define sqlite3CteNew(P,T,E,S) ((void*)0)
@@ -21093,6 +21544,7 @@ SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse*, int);
#define sqlite3SelectExprHeight(x) 0
#define sqlite3ExprCheckHeight(x,y)
#endif
+SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr*,int);
SQLITE_PRIVATE u32 sqlite3Get4byte(const u8*);
SQLITE_PRIVATE void sqlite3Put4byte(u8*, u32);
@@ -21378,9 +21830,6 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC
"4_BYTE_ALIGNED_MALLOC",
#endif
-#ifdef SQLITE_64BIT_STATS
- "64BIT_STATS",
-#endif
#ifdef SQLITE_ALLOW_COVERING_INDEX_SCAN
# if SQLITE_ALLOW_COVERING_INDEX_SCAN != 1
"ALLOW_COVERING_INDEX_SCAN=" CTIMEOPT_VAL(SQLITE_ALLOW_COVERING_INDEX_SCAN),
@@ -21676,6 +22125,9 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS
"EXPLAIN_ESTIMATED_ROWS",
#endif
+#ifdef SQLITE_EXTRA_AUTOEXT
+ "EXTRA_AUTOEXT=" CTIMEOPT_VAL(SQLITE_EXTRA_AUTOEXT),
+#endif
#ifdef SQLITE_EXTRA_IFNULLROW
"EXTRA_IFNULLROW",
#endif
@@ -21717,6 +22169,9 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_INTEGRITY_CHECK_ERROR_MAX
"INTEGRITY_CHECK_ERROR_MAX=" CTIMEOPT_VAL(SQLITE_INTEGRITY_CHECK_ERROR_MAX),
#endif
+#ifdef SQLITE_LEGACY_JSON_VALID
+ "LEGACY_JSON_VALID",
+#endif
#ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS
"LIKE_DOESNT_MATCH_BLOBS",
#endif
@@ -21954,6 +22409,9 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS
"OMIT_SCHEMA_VERSION_PRAGMAS",
#endif
+#ifdef SQLITE_OMIT_SEH
+ "OMIT_SEH",
+#endif
#ifdef SQLITE_OMIT_SHARED_CACHE
"OMIT_SHARED_CACHE",
#endif
@@ -22351,6 +22809,10 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = {
SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */
0, /* bSmallMalloc */
1, /* bExtraSchemaChecks */
+ sizeof(LONGDOUBLE_TYPE)>8, /* bUseLongDouble */
+#ifdef SQLITE_DEBUG
+ 0, /* bJsonSelfcheck */
+#endif
0x7ffffffe, /* mxStrlen */
0, /* neverCorrupt */
SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */
@@ -22580,6 +23042,9 @@ typedef struct VdbeSorter VdbeSorter;
/* Elements of the linked list at Vdbe.pAuxData */
typedef struct AuxData AuxData;
+/* A cache of large TEXT or BLOB values in a VdbeCursor */
+typedef struct VdbeTxtBlbCache VdbeTxtBlbCache;
+
/* Types of VDBE cursors */
#define CURTYPE_BTREE 0
#define CURTYPE_SORTER 1
@@ -22611,6 +23076,7 @@ struct VdbeCursor {
Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */
Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */
Bool noReuse:1; /* OpenEphemeral may not reuse this cursor */
+ Bool colCache:1; /* pCache pointer is initialized and non-NULL */
u16 seekHit; /* See the OP_SeekHit and OP_IfNoHope opcodes */
union { /* pBtx for isEphermeral. pAltMap otherwise */
Btree *pBtx; /* Separate file holding temporary table */
@@ -22651,6 +23117,7 @@ struct VdbeCursor {
#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
u64 maskUsed; /* Mask of columns used by this cursor */
#endif
+ VdbeTxtBlbCache *pCache; /* Cache of large TEXT or BLOB values */
/* 2*nField extra array elements allocated for aType[], beyond the one
** static element declared in the structure. nField total array slots for
@@ -22663,13 +23130,26 @@ struct VdbeCursor {
#define IsNullCursor(P) \
((P)->eCurType==CURTYPE_PSEUDO && (P)->nullRow && (P)->seekResult==0)
-
/*
** A value for VdbeCursor.cacheStatus that means the cache is always invalid.
*/
#define CACHE_STALE 0
/*
+** Large TEXT or BLOB values can be slow to load, so we want to avoid
+** loading them more than once. For that reason, large TEXT and BLOB values
+** can be stored in a cache defined by this object, and attached to the
+** VdbeCursor using the pCache field.
+*/
+struct VdbeTxtBlbCache {
+ char *pCValue; /* A RCStr buffer to hold the value */
+ i64 iOffset; /* File offset of the row being cached */
+ int iCol; /* Column for which the cache is valid */
+ u32 cacheStatus; /* Vdbe.cacheCtr value */
+ u32 colCacheCtr; /* Column cache counter */
+};
+
+/*
** When a sub-program is executed (OP_Program), a structure of this type
** is allocated to store the current value of the program counter, as
** well as the current memory cell array and various other frame specific
@@ -22989,16 +23469,18 @@ struct Vdbe {
u32 nWrite; /* Number of write operations that have occurred */
#endif
u16 nResColumn; /* Number of columns in one row of the result set */
+ u16 nResAlloc; /* Column slots allocated to aColName[] */
u8 errorAction; /* Recovery action to do in case of an error */
u8 minWriteFileFormat; /* Minimum file format for writable database files */
u8 prepFlags; /* SQLITE_PREPARE_* flags */
u8 eVdbeState; /* On of the VDBE_*_STATE values */
bft expired:2; /* 1: recompile VM immediately 2: when convenient */
- bft explain:2; /* True if EXPLAIN present on SQL command */
+ bft explain:2; /* 0: normal, 1: EXPLAIN, 2: EXPLAIN QUERY PLAN */
bft changeCntOn:1; /* True to update the change-counter */
bft usesStmtJournal:1; /* True if uses a statement journal */
bft readOnly:1; /* True for statements that do not write */
bft bIsReader:1; /* True for statements that read */
+ bft haveEqpOps:1; /* Bytecode supports EXPLAIN QUERY PLAN */
yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */
yDbMask lockMask; /* Subset of btreeMask that requires a lock */
u32 aCounter[9]; /* Counters used by sqlite3_stmt_status() */
@@ -23045,7 +23527,7 @@ struct PreUpdate {
i64 iKey1; /* First key value passed to hook */
i64 iKey2; /* Second key value passed to hook */
Mem *aNew; /* Array of new.* values */
- Table *pTab; /* Schema object being upated */
+ Table *pTab; /* Schema object being updated */
Index *pPk; /* PK index if pTab is WITHOUT ROWID */
};
@@ -23135,6 +23617,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetZeroBlob(Mem*,int);
SQLITE_PRIVATE int sqlite3VdbeMemIsRowSet(const Mem*);
#endif
SQLITE_PRIVATE int sqlite3VdbeMemSetRowSet(Mem*);
+SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8);
SQLITE_PRIVATE int sqlite3IntFloatCompare(i64,double);
@@ -23582,7 +24065,7 @@ SQLITE_API int sqlite3_db_status(
case SQLITE_DBSTATUS_CACHE_MISS:
case SQLITE_DBSTATUS_CACHE_WRITE:{
int i;
- int nRet = 0;
+ u64 nRet = 0;
assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 );
assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 );
@@ -23595,7 +24078,7 @@ SQLITE_API int sqlite3_db_status(
*pHighwater = 0; /* IMP: R-42420-56072 */
/* IMP: R-54100-20147 */
/* IMP: R-29431-39229 */
- *pCurrent = nRet;
+ *pCurrent = (int)nRet & 0x7fffffff;
break;
}
@@ -23731,8 +24214,8 @@ struct DateTime {
*/
static int getDigits(const char *zDate, const char *zFormat, ...){
/* The aMx[] array translates the 3rd character of each format
- ** spec into a max size: a b c d e f */
- static const u16 aMx[] = { 12, 14, 24, 31, 59, 9999 };
+ ** spec into a max size: a b c d e f */
+ static const u16 aMx[] = { 12, 14, 24, 31, 59, 14712 };
va_list ap;
int cnt = 0;
char nextC;
@@ -24073,17 +24556,14 @@ static void computeYMD(DateTime *p){
** Compute the Hour, Minute, and Seconds from the julian day number.
*/
static void computeHMS(DateTime *p){
- int s;
+ int day_ms, day_min; /* milliseconds, minutes into the day */
if( p->validHMS ) return;
computeJD(p);
- s = (int)((p->iJD + 43200000) % 86400000);
- p->s = s/1000.0;
- s = (int)p->s;
- p->s -= s;
- p->h = s/3600;
- s -= p->h*3600;
- p->m = s/60;
- p->s += s - p->m*60;
+ day_ms = (int)((p->iJD + 43200000) % 86400000);
+ p->s = (day_ms % 60000)/1000.0;
+ day_min = day_ms/60000;
+ p->m = day_min % 60;
+ p->h = day_min / 60;
p->rawS = 0;
p->validHMS = 1;
}
@@ -24263,6 +24743,25 @@ static const struct {
};
/*
+** If the DateTime p is raw number, try to figure out if it is
+** a julian day number of a unix timestamp. Set the p value
+** appropriately.
+*/
+static void autoAdjustDate(DateTime *p){
+ if( !p->rawS || p->validJD ){
+ p->rawS = 0;
+ }else if( p->s>=-21086676*(i64)10000 /* -4713-11-24 12:00:00 */
+ && p->s<=(25340230*(i64)10000)+799 /* 9999-12-31 23:59:59 */
+ ){
+ double r = p->s*1000.0 + 210866760000000.0;
+ clearYMD_HMS_TZ(p);
+ p->iJD = (sqlite3_int64)(r + 0.5);
+ p->validJD = 1;
+ p->rawS = 0;
+ }
+}
+
+/*
** Process a modifier to a date-time stamp. The modifiers are
** as follows:
**
@@ -24305,19 +24804,8 @@ static int parseModifier(
*/
if( sqlite3_stricmp(z, "auto")==0 ){
if( idx>1 ) return 1; /* IMP: R-33611-57934 */
- if( !p->rawS || p->validJD ){
- rc = 0;
- p->rawS = 0;
- }else if( p->s>=-21086676*(i64)10000 /* -4713-11-24 12:00:00 */
- && p->s<=(25340230*(i64)10000)+799 /* 9999-12-31 23:59:59 */
- ){
- r = p->s*1000.0 + 210866760000000.0;
- clearYMD_HMS_TZ(p);
- p->iJD = (sqlite3_int64)(r + 0.5);
- p->validJD = 1;
- p->rawS = 0;
- rc = 0;
- }
+ autoAdjustDate(p);
+ rc = 0;
}
break;
}
@@ -24483,18 +24971,73 @@ static int parseModifier(
case '9': {
double rRounder;
int i;
- for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){}
+ int Y,M,D,h,m,x;
+ const char *z2 = z;
+ char z0 = z[0];
+ for(n=1; z[n]; n++){
+ if( z[n]==':' ) break;
+ if( sqlite3Isspace(z[n]) ) break;
+ if( z[n]=='-' ){
+ if( n==5 && getDigits(&z[1], "40f", &Y)==1 ) break;
+ if( n==6 && getDigits(&z[1], "50f", &Y)==1 ) break;
+ }
+ }
if( sqlite3AtoF(z, &r, n, SQLITE_UTF8)<=0 ){
- rc = 1;
+ assert( rc==1 );
break;
}
- if( z[n]==':' ){
+ if( z[n]=='-' ){
+ /* A modifier of the form (+|-)YYYY-MM-DD adds or subtracts the
+ ** specified number of years, months, and days. MM is limited to
+ ** the range 0-11 and DD is limited to 0-30.
+ */
+ if( z0!='+' && z0!='-' ) break; /* Must start with +/- */
+ if( n==5 ){
+ if( getDigits(&z[1], "40f-20a-20d", &Y, &M, &D)!=3 ) break;
+ }else{
+ assert( n==6 );
+ if( getDigits(&z[1], "50f-20a-20d", &Y, &M, &D)!=3 ) break;
+ z++;
+ }
+ if( M>=12 ) break; /* M range 0..11 */
+ if( D>=31 ) break; /* D range 0..30 */
+ computeYMD_HMS(p);
+ p->validJD = 0;
+ if( z0=='-' ){
+ p->Y -= Y;
+ p->M -= M;
+ D = -D;
+ }else{
+ p->Y += Y;
+ p->M += M;
+ }
+ x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12;
+ p->Y += x;
+ p->M -= x*12;
+ computeJD(p);
+ p->validHMS = 0;
+ p->validYMD = 0;
+ p->iJD += (i64)D*86400000;
+ if( z[11]==0 ){
+ rc = 0;
+ break;
+ }
+ if( sqlite3Isspace(z[11])
+ && getDigits(&z[12], "20c:20e", &h, &m)==2
+ ){
+ z2 = &z[12];
+ n = 2;
+ }else{
+ break;
+ }
+ }
+ if( z2[n]==':' ){
/* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the
** specified number of hours, minutes, seconds, and fractional seconds
** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be
** omitted.
*/
- const char *z2 = z;
+
DateTime tx;
sqlite3_int64 day;
if( !sqlite3Isdigit(*z2) ) z2++;
@@ -24504,7 +25047,7 @@ static int parseModifier(
tx.iJD -= 43200000;
day = tx.iJD/86400000;
tx.iJD -= day*86400000;
- if( z[0]=='-' ) tx.iJD = -tx.iJD;
+ if( z0=='-' ) tx.iJD = -tx.iJD;
computeJD(p);
clearYMD_HMS_TZ(p);
p->iJD += tx.iJD;
@@ -24520,7 +25063,7 @@ static int parseModifier(
if( n>10 || n<3 ) break;
if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--;
computeJD(p);
- rc = 1;
+ assert( rc==1 );
rRounder = r<0 ? -0.5 : +0.5;
for(i=0; i<ArraySize(aXformType); i++){
if( aXformType[i].nName==n
@@ -24529,7 +25072,6 @@ static int parseModifier(
){
switch( i ){
case 4: { /* Special processing to add months */
- int x;
assert( strcmp(aXformType[i].zName,"month")==0 );
computeYMD_HMS(p);
p->M += (int)r;
@@ -24605,6 +25147,12 @@ static int isDate(
}
computeJD(p);
if( p->isError || !validJulianDay(p->iJD) ) return 1;
+ if( argc==1 && p->validYMD && p->D>28 ){
+ /* Make sure a YYYY-MM-DD is normalized.
+ ** Example: 2023-02-31 -> 2023-03-03 */
+ assert( p->validJD );
+ p->validYMD = 0;
+ }
return 0;
}
@@ -24688,7 +25236,7 @@ static void datetimeFunc(
zBuf[16] = '0' + (x.m)%10;
zBuf[17] = ':';
if( x.useSubsec ){
- s = (int)1000.0*x.s;
+ s = (int)(1000.0*x.s + 0.5);
zBuf[18] = '0' + (s/10000)%10;
zBuf[19] = '0' + (s/1000)%10;
zBuf[20] = '.';
@@ -24735,7 +25283,7 @@ static void timeFunc(
zBuf[4] = '0' + (x.m)%10;
zBuf[5] = ':';
if( x.useSubsec ){
- s = (int)1000.0*x.s;
+ s = (int)(1000.0*x.s + 0.5);
zBuf[6] = '0' + (s/10000)%10;
zBuf[7] = '0' + (s/1000)%10;
zBuf[8] = '.';
@@ -24806,7 +25354,7 @@ static void dateFunc(
** %M minute 00-59
** %s seconds since 1970-01-01
** %S seconds 00-59
-** %w day of week 0-6 sunday==0
+** %w day of week 0-6 Sunday==0
** %W week of year 00-53
** %Y year 0000-9999
** %% %
@@ -24832,13 +25380,16 @@ static void strftimeFunc(
computeJD(&x);
computeYMD_HMS(&x);
for(i=j=0; zFmt[i]; i++){
+ char cf;
if( zFmt[i]!='%' ) continue;
if( j<i ) sqlite3_str_append(&sRes, zFmt+j, (int)(i-j));
i++;
j = i + 1;
- switch( zFmt[i] ){
- case 'd': {
- sqlite3_str_appendf(&sRes, "%02d", x.D);
+ cf = zFmt[i];
+ switch( cf ){
+ case 'd': /* Fall thru */
+ case 'e': {
+ sqlite3_str_appendf(&sRes, cf=='d' ? "%02d" : "%2d", x.D);
break;
}
case 'f': {
@@ -24847,8 +25398,21 @@ static void strftimeFunc(
sqlite3_str_appendf(&sRes, "%06.3f", s);
break;
}
- case 'H': {
- sqlite3_str_appendf(&sRes, "%02d", x.h);
+ case 'F': {
+ sqlite3_str_appendf(&sRes, "%04d-%02d-%02d", x.Y, x.M, x.D);
+ break;
+ }
+ case 'H':
+ case 'k': {
+ sqlite3_str_appendf(&sRes, cf=='H' ? "%02d" : "%2d", x.h);
+ break;
+ }
+ case 'I': /* Fall thru */
+ case 'l': {
+ int h = x.h;
+ if( h>12 ) h -= 12;
+ if( h==0 ) h = 12;
+ sqlite3_str_appendf(&sRes, cf=='I' ? "%02d" : "%2d", h);
break;
}
case 'W': /* Fall thru */
@@ -24860,7 +25424,7 @@ static void strftimeFunc(
y.D = 1;
computeJD(&y);
nDay = (int)((x.iJD-y.iJD+43200000)/86400000);
- if( zFmt[i]=='W' ){
+ if( cf=='W' ){
int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */
wd = (int)(((x.iJD+43200000)/86400000)%7);
sqlite3_str_appendf(&sRes,"%02d",(nDay+7-wd)/7);
@@ -24881,6 +25445,19 @@ static void strftimeFunc(
sqlite3_str_appendf(&sRes,"%02d",x.m);
break;
}
+ case 'p': /* Fall thru */
+ case 'P': {
+ if( x.h>=12 ){
+ sqlite3_str_append(&sRes, cf=='p' ? "PM" : "pm", 2);
+ }else{
+ sqlite3_str_append(&sRes, cf=='p' ? "AM" : "am", 2);
+ }
+ break;
+ }
+ case 'R': {
+ sqlite3_str_appendf(&sRes, "%02d:%02d", x.h, x.m);
+ break;
+ }
case 's': {
if( x.useSubsec ){
sqlite3_str_appendf(&sRes,"%.3f",
@@ -24895,9 +25472,15 @@ static void strftimeFunc(
sqlite3_str_appendf(&sRes,"%02d",(int)x.s);
break;
}
+ case 'T': {
+ sqlite3_str_appendf(&sRes,"%02d:%02d:%02d", x.h, x.m, (int)x.s);
+ break;
+ }
+ case 'u': /* Fall thru */
case 'w': {
- sqlite3_str_appendchar(&sRes, 1,
- (char)(((x.iJD+129600000)/86400000) % 7) + '0');
+ char c = (char)(((x.iJD+129600000)/86400000) % 7) + '0';
+ if( c=='0' && cf=='u' ) c = '7';
+ sqlite3_str_appendchar(&sRes, 1, c);
break;
}
case 'Y': {
@@ -24947,6 +25530,117 @@ static void cdateFunc(
}
/*
+** timediff(DATE1, DATE2)
+**
+** Return the amount of time that must be added to DATE2 in order to
+** convert it into DATE2. The time difference format is:
+**
+** +YYYY-MM-DD HH:MM:SS.SSS
+**
+** The initial "+" becomes "-" if DATE1 occurs before DATE2. For
+** date/time values A and B, the following invariant should hold:
+**
+** datetime(A) == (datetime(B, timediff(A,B))
+**
+** Both DATE arguments must be either a julian day number, or an
+** ISO-8601 string. The unix timestamps are not supported by this
+** routine.
+*/
+static void timediffFunc(
+ sqlite3_context *context,
+ int NotUsed1,
+ sqlite3_value **argv
+){
+ char sign;
+ int Y, M;
+ DateTime d1, d2;
+ sqlite3_str sRes;
+ UNUSED_PARAMETER(NotUsed1);
+ if( isDate(context, 1, &argv[0], &d1) ) return;
+ if( isDate(context, 1, &argv[1], &d2) ) return;
+ computeYMD_HMS(&d1);
+ computeYMD_HMS(&d2);
+ if( d1.iJD>=d2.iJD ){
+ sign = '+';
+ Y = d1.Y - d2.Y;
+ if( Y ){
+ d2.Y = d1.Y;
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ M = d1.M - d2.M;
+ if( M<0 ){
+ Y--;
+ M += 12;
+ }
+ if( M!=0 ){
+ d2.M = d1.M;
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ while( d1.iJD<d2.iJD ){
+ M--;
+ if( M<0 ){
+ M = 11;
+ Y--;
+ }
+ d2.M--;
+ if( d2.M<1 ){
+ d2.M = 12;
+ d2.Y--;
+ }
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ d1.iJD -= d2.iJD;
+ d1.iJD += (u64)1486995408 * (u64)100000;
+ }else /* d1<d2 */{
+ sign = '-';
+ Y = d2.Y - d1.Y;
+ if( Y ){
+ d2.Y = d1.Y;
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ M = d2.M - d1.M;
+ if( M<0 ){
+ Y--;
+ M += 12;
+ }
+ if( M!=0 ){
+ d2.M = d1.M;
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ while( d1.iJD>d2.iJD ){
+ M--;
+ if( M<0 ){
+ M = 11;
+ Y--;
+ }
+ d2.M++;
+ if( d2.M>12 ){
+ d2.M = 1;
+ d2.Y++;
+ }
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ d1.iJD = d2.iJD - d1.iJD;
+ d1.iJD += (u64)1486995408 * (u64)100000;
+ }
+ d1.validYMD = 0;
+ d1.validHMS = 0;
+ d1.validTZ = 0;
+ computeYMD_HMS(&d1);
+ sqlite3StrAccumInit(&sRes, 0, 0, 0, 100);
+ sqlite3_str_appendf(&sRes, "%c%04d-%02d-%02d %02d:%02d:%06.3f",
+ sign, Y, M, d1.D-1, d1.h, d1.m, d1.s);
+ sqlite3ResultStrAccum(context, &sRes);
+}
+
+
+/*
** current_timestamp()
**
** This function returns the same value as datetime('now').
@@ -25020,6 +25714,7 @@ SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){
PURE_DATE(time, -1, 0, 0, timeFunc ),
PURE_DATE(datetime, -1, 0, 0, datetimeFunc ),
PURE_DATE(strftime, -1, 0, 0, strftimeFunc ),
+ PURE_DATE(timediff, 2, 0, 0, timediffFunc ),
DFUNCTION(current_time, 0, 0, 0, ctimeFunc ),
DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc),
DFUNCTION(current_date, 0, 0, 0, cdateFunc ),
@@ -25173,7 +25868,7 @@ SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){
/* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite
** is using a regular VFS, it is called after the corresponding
** transaction has been committed. Injecting a fault at this point
- ** confuses the test scripts - the COMMIT comand returns SQLITE_NOMEM
+ ** confuses the test scripts - the COMMIT command returns SQLITE_NOMEM
** but the transaction is committed anyway.
**
** The core must call OsFileControl() though, not OsFileControlHint(),
@@ -25794,7 +26489,7 @@ static void *sqlite3MemMalloc(int nByte){
** or sqlite3MemRealloc().
**
** For this low-level routine, we already know that pPrior!=0 since
-** cases where pPrior==0 will have been intecepted and dealt with
+** cases where pPrior==0 will have been intercepted and dealt with
** by higher-level routines.
*/
static void sqlite3MemFree(void *pPrior){
@@ -25882,7 +26577,7 @@ static int sqlite3MemInit(void *NotUsed){
return SQLITE_OK;
}
len = sizeof(cpuCount);
- /* One usually wants to use hw.acctivecpu for MT decisions, but not here */
+ /* One usually wants to use hw.activecpu for MT decisions, but not here */
sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0);
if( cpuCount>1 ){
/* defer MT decisions to system malloc */
@@ -27874,7 +28569,7 @@ static void checkMutexFree(sqlite3_mutex *p){
assert( SQLITE_MUTEX_FAST<2 );
assert( SQLITE_MUTEX_WARNONCONTENTION<2 );
-#if SQLITE_ENABLE_API_ARMOR
+#ifdef SQLITE_ENABLE_API_ARMOR
if( ((CheckMutex*)p)->iType<2 )
#endif
{
@@ -28349,7 +29044,7 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
/*
** The sqlite3_mutex.id, sqlite3_mutex.nRef, and sqlite3_mutex.owner fields
-** are necessary under two condidtions: (1) Debug builds and (2) using
+** are necessary under two conditions: (1) Debug builds and (2) using
** home-grown mutexes. Encapsulate these conditions into a single #define.
*/
#if defined(SQLITE_DEBUG) || defined(SQLITE_HOMEGROWN_RECURSIVE_MUTEX)
@@ -28546,7 +29241,7 @@ static sqlite3_mutex *pthreadMutexAlloc(int iType){
*/
static void pthreadMutexFree(sqlite3_mutex *p){
assert( p->nRef==0 );
-#if SQLITE_ENABLE_API_ARMOR
+#ifdef SQLITE_ENABLE_API_ARMOR
if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE )
#endif
{
@@ -28850,7 +29545,7 @@ struct sqlite3_mutex {
CRITICAL_SECTION mutex; /* Mutex controlling the lock */
int id; /* Mutex type */
#ifdef SQLITE_DEBUG
- volatile int nRef; /* Number of enterances */
+ volatile int nRef; /* Number of entrances */
volatile DWORD owner; /* Thread holding this mutex */
volatile LONG trace; /* True to trace changes */
#endif
@@ -28899,7 +29594,7 @@ SQLITE_PRIVATE void sqlite3MemoryBarrier(void){
SQLITE_MEMORY_BARRIER;
#elif defined(__GNUC__)
__sync_synchronize();
-#elif MSVC_VERSION>=1300
+#elif MSVC_VERSION>=1400
_ReadWriteBarrier();
#elif defined(MemoryBarrier)
MemoryBarrier();
@@ -30110,7 +30805,7 @@ SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){
if( db->mallocFailed || rc ){
return apiHandleError(db, rc);
}
- return rc & db->errMask;
+ return 0;
}
/************** End of malloc.c **********************************************/
@@ -30222,57 +30917,6 @@ static const et_info fmtinfo[] = {
** %!S Like %S but prefer the zName over the zAlias
*/
-/* Floating point constants used for rounding */
-static const double arRound[] = {
- 5.0e-01, 5.0e-02, 5.0e-03, 5.0e-04, 5.0e-05,
- 5.0e-06, 5.0e-07, 5.0e-08, 5.0e-09, 5.0e-10,
-};
-
-/*
-** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point
-** conversions will work.
-*/
-#ifndef SQLITE_OMIT_FLOATING_POINT
-/*
-** "*val" is a double such that 0.1 <= *val < 10.0
-** Return the ascii code for the leading digit of *val, then
-** multiply "*val" by 10.0 to renormalize.
-**
-** Example:
-** input: *val = 3.14159
-** output: *val = 1.4159 function return = '3'
-**
-** The counter *cnt is incremented each time. After counter exceeds
-** 16 (the number of significant digits in a 64-bit float) '0' is
-** always returned.
-*/
-static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){
- int digit;
- LONGDOUBLE_TYPE d;
- if( (*cnt)<=0 ) return '0';
- (*cnt)--;
- digit = (int)*val;
- d = digit;
- digit += '0';
- *val = (*val - d)*10.0;
- return (char)digit;
-}
-#endif /* SQLITE_OMIT_FLOATING_POINT */
-
-#ifndef SQLITE_OMIT_FLOATING_POINT
-/*
-** "*val" is a u64. *msd is a divisor used to extract the
-** most significant digit of *val. Extract that most significant
-** digit and return it.
-*/
-static char et_getdigit_int(u64 *val, u64 *msd){
- u64 x = (*val)/(*msd);
- *val -= x*(*msd);
- if( *msd>=10 ) *msd /= 10;
- return '0' + (char)(x & 15);
-}
-#endif /* SQLITE_OMIT_FLOATING_POINT */
-
/*
** Set the StrAccum object to an error mode.
*/
@@ -30364,20 +31008,15 @@ SQLITE_API void sqlite3_str_vappendf(
u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */
char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */
sqlite_uint64 longvalue; /* Value for integer types */
- LONGDOUBLE_TYPE realvalue; /* Value for real types */
- sqlite_uint64 msd; /* Divisor to get most-significant-digit
- ** of longvalue */
+ double realvalue; /* Value for real types */
const et_info *infop; /* Pointer to the appropriate info structure */
char *zOut; /* Rendering buffer */
int nOut; /* Size of the rendering buffer */
char *zExtra = 0; /* Malloced memory used by some conversion */
-#ifndef SQLITE_OMIT_FLOATING_POINT
- int exp, e2; /* exponent of real numbers */
- int nsd; /* Number of significant digits returned */
- double rounder; /* Used for rounding floating point values */
+ int exp, e2; /* exponent of real numbers */
etByte flag_dp; /* True if decimal point should be shown */
etByte flag_rtz; /* True if trailing zeros should be removed */
-#endif
+
PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */
char buf[etBUFSIZE]; /* Conversion buffer */
@@ -30652,95 +31291,62 @@ SQLITE_API void sqlite3_str_vappendf(
break;
case etFLOAT:
case etEXP:
- case etGENERIC:
+ case etGENERIC: {
+ FpDecode s;
+ int iRound;
+ int j;
+
if( bArgList ){
realvalue = getDoubleArg(pArgList);
}else{
realvalue = va_arg(ap,double);
}
-#ifdef SQLITE_OMIT_FLOATING_POINT
- length = 0;
-#else
if( precision<0 ) precision = 6; /* Set default precision */
#ifdef SQLITE_FP_PRECISION_LIMIT
if( precision>SQLITE_FP_PRECISION_LIMIT ){
precision = SQLITE_FP_PRECISION_LIMIT;
}
#endif
- if( realvalue<0.0 ){
- realvalue = -realvalue;
- prefix = '-';
+ if( xtype==etFLOAT ){
+ iRound = -precision;
+ }else if( xtype==etGENERIC ){
+ iRound = precision;
}else{
- prefix = flag_prefix;
+ iRound = precision+1;
}
- exp = 0;
- if( xtype==etGENERIC && precision>0 ) precision--;
- testcase( precision>0xfff );
- if( realvalue<1.0e+16
- && realvalue==(LONGDOUBLE_TYPE)(longvalue = (u64)realvalue)
- ){
- /* Number is a pure integer that can be represented as u64 */
- for(msd=1; msd*10<=longvalue; msd *= 10, exp++){}
- if( exp>precision && xtype!=etFLOAT ){
- u64 rnd = msd/2;
- int kk = precision;
- while( kk-- > 0 ){ rnd /= 10; }
- longvalue += rnd;
- }
- }else{
- msd = 0;
- longvalue = 0; /* To prevent a compiler warning */
- idx = precision & 0xfff;
- rounder = arRound[idx%10];
- while( idx>=10 ){ rounder *= 1.0e-10; idx -= 10; }
- if( xtype==etFLOAT ){
- double rx = (double)realvalue;
- sqlite3_uint64 u;
- int ex;
- memcpy(&u, &rx, sizeof(u));
- ex = -1023 + (int)((u>>52)&0x7ff);
- if( precision+(ex/3) < 15 ) rounder += realvalue*3e-16;
- realvalue += rounder;
- }
- if( sqlite3IsNaN((double)realvalue) ){
- if( flag_zeropad ){
- bufpt = "null";
- length = 4;
+ sqlite3FpDecode(&s, realvalue, iRound, flag_altform2 ? 26 : 16);
+ if( s.isSpecial ){
+ if( s.isSpecial==2 ){
+ bufpt = flag_zeropad ? "null" : "NaN";
+ length = sqlite3Strlen30(bufpt);
+ break;
+ }else if( flag_zeropad ){
+ s.z[0] = '9';
+ s.iDP = 1000;
+ s.n = 1;
+ }else{
+ memcpy(buf, "-Inf", 5);
+ bufpt = buf;
+ if( s.sign=='-' ){
+ /* no-op */
+ }else if( flag_prefix ){
+ buf[0] = flag_prefix;
}else{
- bufpt = "NaN";
- length = 3;
+ bufpt++;
}
+ length = sqlite3Strlen30(bufpt);
break;
}
-
- /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */
- if( ALWAYS(realvalue>0.0) ){
- LONGDOUBLE_TYPE scale = 1.0;
- while( realvalue>=1e100*scale && exp<=350){ scale*=1e100;exp+=100;}
- while( realvalue>=1e10*scale && exp<=350 ){ scale*=1e10; exp+=10; }
- while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; }
- realvalue /= scale;
- while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; }
- while( realvalue<1.0 ){ realvalue *= 10.0; exp--; }
- if( exp>350 ){
- if( flag_zeropad ){
- realvalue = 9.0;
- exp = 999;
- }else{
- bufpt = buf;
- buf[0] = prefix;
- memcpy(buf+(prefix!=0),"Inf",4);
- length = 3+(prefix!=0);
- break;
- }
- }
- if( xtype!=etFLOAT ){
- realvalue += rounder;
- if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; }
- }
- }
+ }
+ if( s.sign=='-' ){
+ prefix = '-';
+ }else{
+ prefix = flag_prefix;
}
+ exp = s.iDP-1;
+ if( xtype==etGENERIC && precision>0 ) precision--;
+
/*
** If the field type is etGENERIC, then convert to either etEXP
** or etFLOAT, as appropriate.
@@ -30759,9 +31365,8 @@ SQLITE_API void sqlite3_str_vappendf(
if( xtype==etEXP ){
e2 = 0;
}else{
- e2 = exp;
+ e2 = s.iDP - 1;
}
- nsd = 16 + flag_altform2*10;
bufpt = buf;
{
i64 szBufNeeded; /* Size of a temporary buffer needed */
@@ -30779,16 +31384,12 @@ SQLITE_API void sqlite3_str_vappendf(
*(bufpt++) = prefix;
}
/* Digits prior to the decimal point */
+ j = 0;
if( e2<0 ){
*(bufpt++) = '0';
- }else if( msd>0 ){
- for(; e2>=0; e2--){
- *(bufpt++) = et_getdigit_int(&longvalue,&msd);
- if( cThousand && (e2%3)==0 && e2>1 ) *(bufpt++) = ',';
- }
}else{
for(; e2>=0; e2--){
- *(bufpt++) = et_getdigit(&realvalue,&nsd);
+ *(bufpt++) = j<s.n ? s.z[j++] : '0';
if( cThousand && (e2%3)==0 && e2>1 ) *(bufpt++) = ',';
}
}
@@ -30798,19 +31399,12 @@ SQLITE_API void sqlite3_str_vappendf(
}
/* "0" digits after the decimal point but before the first
** significant digit of the number */
- for(e2++; e2<0; precision--, e2++){
- assert( precision>0 );
+ for(e2++; e2<0 && precision>0; precision--, e2++){
*(bufpt++) = '0';
}
/* Significant digits after the decimal point */
- if( msd>0 ){
- while( (precision--)>0 ){
- *(bufpt++) = et_getdigit_int(&longvalue,&msd);
- }
- }else{
- while( (precision--)>0 ){
- *(bufpt++) = et_getdigit(&realvalue,&nsd);
- }
+ while( (precision--)>0 ){
+ *(bufpt++) = j<s.n ? s.z[j++] : '0';
}
/* Remove trailing zeros and the "." if no digits follow the "." */
if( flag_rtz && flag_dp ){
@@ -30826,6 +31420,7 @@ SQLITE_API void sqlite3_str_vappendf(
}
/* Add the "eNNN" suffix */
if( xtype==etEXP ){
+ exp = s.iDP - 1;
*(bufpt++) = aDigits[infop->charset];
if( exp<0 ){
*(bufpt++) = '-'; exp = -exp;
@@ -30859,8 +31454,8 @@ SQLITE_API void sqlite3_str_vappendf(
while( nPad-- ) bufpt[i++] = '0';
length = width;
}
-#endif /* !defined(SQLITE_OMIT_FLOATING_POINT) */
break;
+ }
case etSIZE:
if( !bArgList ){
*(va_arg(ap,int*)) = pAccum->nChar;
@@ -31584,6 +32179,75 @@ SQLITE_API void sqlite3_str_appendf(StrAccum *p, const char *zFormat, ...){
va_end(ap);
}
+
+/*****************************************************************************
+** Reference counted string/blob storage
+*****************************************************************************/
+
+/*
+** Increase the reference count of the string by one.
+**
+** The input parameter is returned.
+*/
+SQLITE_PRIVATE char *sqlite3RCStrRef(char *z){
+ RCStr *p = (RCStr*)z;
+ assert( p!=0 );
+ p--;
+ p->nRCRef++;
+ return z;
+}
+
+/*
+** Decrease the reference count by one. Free the string when the
+** reference count reaches zero.
+*/
+SQLITE_PRIVATE void sqlite3RCStrUnref(void *z){
+ RCStr *p = (RCStr*)z;
+ assert( p!=0 );
+ p--;
+ assert( p->nRCRef>0 );
+ if( p->nRCRef>=2 ){
+ p->nRCRef--;
+ }else{
+ sqlite3_free(p);
+ }
+}
+
+/*
+** Create a new string that is capable of holding N bytes of text, not counting
+** the zero byte at the end. The string is uninitialized.
+**
+** The reference count is initially 1. Call sqlite3RCStrUnref() to free the
+** newly allocated string.
+**
+** This routine returns 0 on an OOM.
+*/
+SQLITE_PRIVATE char *sqlite3RCStrNew(u64 N){
+ RCStr *p = sqlite3_malloc64( N + sizeof(*p) + 1 );
+ if( p==0 ) return 0;
+ p->nRCRef = 1;
+ return (char*)&p[1];
+}
+
+/*
+** Change the size of the string so that it is able to hold N bytes.
+** The string might be reallocated, so return the new allocation.
+*/
+SQLITE_PRIVATE char *sqlite3RCStrResize(char *z, u64 N){
+ RCStr *p = (RCStr*)z;
+ RCStr *pNew;
+ assert( p!=0 );
+ p--;
+ assert( p->nRCRef==1 );
+ pNew = sqlite3_realloc64(p, N+sizeof(RCStr)+1);
+ if( pNew==0 ){
+ sqlite3_free(p);
+ return 0;
+ }else{
+ return (char*)&pNew[1];
+ }
+}
+
/************** End of printf.c **********************************************/
/************** Begin file treeview.c ****************************************/
/*
@@ -32000,6 +32664,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u
sqlite3TreeViewItem(pView, "FILTER", 1);
sqlite3TreeViewExpr(pView, pWin->pFilter, 0);
sqlite3TreeViewPop(&pView);
+ if( pWin->eFrmType==TK_FILTER ) return;
}
sqlite3TreeViewPush(&pView, more);
if( pWin->zName ){
@@ -32009,7 +32674,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u
}
if( pWin->zBase ) nElement++;
if( pWin->pOrderBy ) nElement++;
- if( pWin->eFrmType ) nElement++;
+ if( pWin->eFrmType!=0 && pWin->eFrmType!=TK_FILTER ) nElement++;
if( pWin->eExclude ) nElement++;
if( pWin->zBase ){
sqlite3TreeViewPush(&pView, (--nElement)>0);
@@ -32022,7 +32687,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u
if( pWin->pOrderBy ){
sqlite3TreeViewExprList(pView, pWin->pOrderBy, (--nElement)>0, "ORDER-BY");
}
- if( pWin->eFrmType ){
+ if( pWin->eFrmType!=0 && pWin->eFrmType!=TK_FILTER ){
char zBuf[30];
const char *zFrmType = "ROWS";
if( pWin->eFrmType==TK_RANGE ) zFrmType = "RANGE";
@@ -32231,7 +32896,8 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
};
assert( pExpr->op2==TK_IS || pExpr->op2==TK_ISNOT );
assert( pExpr->pRight );
- assert( sqlite3ExprSkipCollate(pExpr->pRight)->op==TK_TRUEFALSE );
+ assert( sqlite3ExprSkipCollateAndLikely(pExpr->pRight)->op
+ == TK_TRUEFALSE );
x = (pExpr->op2==TK_ISNOT)*2 + sqlite3ExprTruthValue(pExpr->pRight);
zUniOp = azOp[x];
break;
@@ -32269,7 +32935,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
assert( ExprUseXList(pExpr) );
pFarg = pExpr->x.pList;
#ifndef SQLITE_OMIT_WINDOWFUNC
- pWin = ExprHasProperty(pExpr, EP_WinFunc) ? pExpr->y.pWin : 0;
+ pWin = IsWindowFunc(pExpr) ? pExpr->y.pWin : 0;
#else
pWin = 0;
#endif
@@ -32295,7 +32961,13 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
sqlite3TreeViewLine(pView, "FUNCTION %Q%s", pExpr->u.zToken, zFlgs);
}
if( pFarg ){
- sqlite3TreeViewExprList(pView, pFarg, pWin!=0, 0);
+ sqlite3TreeViewExprList(pView, pFarg, pWin!=0 || pExpr->pLeft, 0);
+ if( pExpr->pLeft ){
+ Expr *pOB = pExpr->pLeft;
+ assert( pOB->op==TK_ORDER );
+ assert( ExprUseXList(pOB) );
+ sqlite3TreeViewExprList(pView, pOB->x.pList, pWin!=0, "ORDERBY");
+ }
}
#ifndef SQLITE_OMIT_WINDOWFUNC
if( pWin ){
@@ -32304,6 +32976,10 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
#endif
break;
}
+ case TK_ORDER: {
+ sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, "ORDERBY");
+ break;
+ }
#ifndef SQLITE_OMIT_SUBQUERY
case TK_EXISTS: {
assert( ExprUseXSelect(pExpr) );
@@ -32357,7 +33033,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
assert( pExpr->x.pList->nExpr==2 );
pY = pExpr->x.pList->a[0].pExpr;
pZ = pExpr->x.pList->a[1].pExpr;
- sqlite3TreeViewLine(pView, "BETWEEN");
+ sqlite3TreeViewLine(pView, "BETWEEN%s", zFlgs);
sqlite3TreeViewExpr(pView, pX, 1);
sqlite3TreeViewExpr(pView, pY, 1);
sqlite3TreeViewExpr(pView, pZ, 0);
@@ -33492,7 +34168,38 @@ SQLITE_PRIVATE u32 sqlite3Utf8Read(
return c;
}
-
+/*
+** Read a single UTF8 character out of buffer z[], but reading no
+** more than n characters from the buffer. z[] is not zero-terminated.
+**
+** Return the number of bytes used to construct the character.
+**
+** Invalid UTF8 might generate a strange result. No effort is made
+** to detect invalid UTF8.
+**
+** At most 4 bytes will be read out of z[]. The return value will always
+** be between 1 and 4.
+*/
+SQLITE_PRIVATE int sqlite3Utf8ReadLimited(
+ const u8 *z,
+ int n,
+ u32 *piOut
+){
+ u32 c;
+ int i = 1;
+ assert( n>0 );
+ c = z[0];
+ if( c>=0xc0 ){
+ c = sqlite3Utf8Trans1[c-0xc0];
+ if( n>4 ) n = 4;
+ while( i<n && (z[i] & 0xc0)==0x80 ){
+ c = (c<<6) + (0x3f & z[i]);
+ i++;
+ }
+ }
+ *piOut = c;
+ return i;
+}
/*
@@ -33890,7 +34597,7 @@ SQLITE_PRIVATE void sqlite3UtfSelfTest(void){
/*
** Calls to sqlite3FaultSim() are used to simulate a failure during testing,
** or to bypass normal error detection during testing in order to let
-** execute proceed futher downstream.
+** execute proceed further downstream.
**
** In deployment, sqlite3FaultSim() *always* return SQLITE_OK (0). The
** sqlite3FaultSim() function only returns non-zero during testing.
@@ -34007,6 +34714,23 @@ SQLITE_PRIVATE void sqlite3ErrorClear(sqlite3 *db){
*/
SQLITE_PRIVATE void sqlite3SystemError(sqlite3 *db, int rc){
if( rc==SQLITE_IOERR_NOMEM ) return;
+#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL)
+ if( rc==SQLITE_IOERR_IN_PAGE ){
+ int ii;
+ int iErr;
+ sqlite3BtreeEnterAll(db);
+ for(ii=0; ii<db->nDb; ii++){
+ if( db->aDb[ii].pBt ){
+ iErr = sqlite3PagerWalSystemErrno(sqlite3BtreePager(db->aDb[ii].pBt));
+ if( iErr ){
+ db->iSysErrno = iErr;
+ }
+ }
+ }
+ sqlite3BtreeLeaveAll(db);
+ return;
+ }
+#endif
rc &= 0xff;
if( rc==SQLITE_CANTOPEN || rc==SQLITE_IOERR ){
db->iSysErrno = sqlite3OsGetLastError(db->pVfs);
@@ -34051,12 +34775,16 @@ SQLITE_PRIVATE void sqlite3ProgressCheck(Parse *p){
p->rc = SQLITE_INTERRUPT;
}
#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
- if( db->xProgress && (++p->nProgressSteps)>=db->nProgressOps ){
- if( db->xProgress(db->pProgressArg) ){
- p->nErr++;
- p->rc = SQLITE_INTERRUPT;
+ if( db->xProgress ){
+ if( p->rc==SQLITE_INTERRUPT ){
+ p->nProgressSteps = 0;
+ }else if( (++p->nProgressSteps)>=db->nProgressOps ){
+ if( db->xProgress(db->pProgressArg) ){
+ p->nErr++;
+ p->rc = SQLITE_INTERRUPT;
+ }
+ p->nProgressSteps = 0;
}
- p->nProgressSteps = 0;
}
#endif
}
@@ -34252,43 +34980,40 @@ SQLITE_PRIVATE u8 sqlite3StrIHash(const char *z){
return h;
}
-/*
-** Compute 10 to the E-th power. Examples: E==1 results in 10.
-** E==2 results in 100. E==50 results in 1.0e50.
+/* Double-Double multiplication. (x[0],x[1]) *= (y,yy)
**
-** This routine only works for values of E between 1 and 341.
+** Reference:
+** T. J. Dekker, "A Floating-Point Technique for Extending the
+** Available Precision". 1971-07-26.
*/
-static LONGDOUBLE_TYPE sqlite3Pow10(int E){
-#if defined(_MSC_VER)
- static const LONGDOUBLE_TYPE x[] = {
- 1.0e+001L,
- 1.0e+002L,
- 1.0e+004L,
- 1.0e+008L,
- 1.0e+016L,
- 1.0e+032L,
- 1.0e+064L,
- 1.0e+128L,
- 1.0e+256L
- };
- LONGDOUBLE_TYPE r = 1.0;
- int i;
- assert( E>=0 && E<=307 );
- for(i=0; E!=0; i++, E >>=1){
- if( E & 1 ) r *= x[i];
- }
- return r;
-#else
- LONGDOUBLE_TYPE x = 10.0;
- LONGDOUBLE_TYPE r = 1.0;
- while(1){
- if( E & 1 ) r *= x;
- E >>= 1;
- if( E==0 ) break;
- x *= x;
- }
- return r;
-#endif
+static void dekkerMul2(volatile double *x, double y, double yy){
+ /*
+ ** The "volatile" keywords on parameter x[] and on local variables
+ ** below are needed force intermediate results to be truncated to
+ ** binary64 rather than be carried around in an extended-precision
+ ** format. The truncation is necessary for the Dekker algorithm to
+ ** work. Intel x86 floating point might omit the truncation without
+ ** the use of volatile.
+ */
+ volatile double tx, ty, p, q, c, cc;
+ double hx, hy;
+ u64 m;
+ memcpy(&m, (void*)&x[0], 8);
+ m &= 0xfffffffffc000000LL;
+ memcpy(&hx, &m, 8);
+ tx = x[0] - hx;
+ memcpy(&m, &y, 8);
+ m &= 0xfffffffffc000000LL;
+ memcpy(&hy, &m, 8);
+ ty = y - hy;
+ p = hx*hy;
+ q = hx*ty + tx*hy;
+ c = p+q;
+ cc = p - c + q + tx*ty;
+ cc = x[0]*yy + x[1]*y + cc;
+ x[0] = c + cc;
+ x[1] = c - x[0];
+ x[1] += cc;
}
/*
@@ -34329,12 +35054,11 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en
const char *zEnd;
/* sign * significand * (10 ^ (esign * exponent)) */
int sign = 1; /* sign of significand */
- i64 s = 0; /* significand */
+ u64 s = 0; /* significand */
int d = 0; /* adjust exponent for shifting decimal point */
int esign = 1; /* sign of exponent */
int e = 0; /* exponent */
int eValid = 1; /* True exponent is either not used or is well-formed */
- double result;
int nDigit = 0; /* Number of digits processed */
int eType = 1; /* 1: pure integer, 2+: fractional -1 or less: bad UTF16 */
@@ -34374,7 +35098,7 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en
while( z<zEnd && sqlite3Isdigit(*z) ){
s = s*10 + (*z - '0');
z+=incr; nDigit++;
- if( s>=((LARGEST_INT64-9)/10) ){
+ if( s>=((LARGEST_UINT64-9)/10) ){
/* skip non-significant significand digits
** (increase exponent by d to shift decimal left) */
while( z<zEnd && sqlite3Isdigit(*z) ){ z+=incr; d++; }
@@ -34389,7 +35113,7 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en
/* copy digits from after decimal to significand
** (decrease exponent by d to shift decimal right) */
while( z<zEnd && sqlite3Isdigit(*z) ){
- if( s<((LARGEST_INT64-9)/10) ){
+ if( s<((LARGEST_UINT64-9)/10) ){
s = s*10 + (*z - '0');
d--;
nDigit++;
@@ -34429,79 +35153,89 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en
while( z<zEnd && sqlite3Isspace(*z) ) z+=incr;
do_atof_calc:
- /* adjust exponent by d, and update sign */
- e = (e*esign) + d;
- if( e<0 ) {
- esign = -1;
- e *= -1;
- } else {
- esign = 1;
+ /* Zero is a special case */
+ if( s==0 ){
+ *pResult = sign<0 ? -0.0 : +0.0;
+ goto atof_return;
}
- if( s==0 ) {
- /* In the IEEE 754 standard, zero is signed. */
- result = sign<0 ? -(double)0 : (double)0;
- } else {
- /* Attempt to reduce exponent.
- **
- ** Branches that are not required for the correct answer but which only
- ** help to obtain the correct answer faster are marked with special
- ** comments, as a hint to the mutation tester.
- */
- while( e>0 ){ /*OPTIMIZATION-IF-TRUE*/
- if( esign>0 ){
- if( s>=(LARGEST_INT64/10) ) break; /*OPTIMIZATION-IF-FALSE*/
- s *= 10;
- }else{
- if( s%10!=0 ) break; /*OPTIMIZATION-IF-FALSE*/
- s /= 10;
- }
- e--;
- }
+ /* adjust exponent by d, and update sign */
+ e = (e*esign) + d;
- /* adjust the sign of significand */
- s = sign<0 ? -s : s;
+ /* Try to adjust the exponent to make it smaller */
+ while( e>0 && s<(LARGEST_UINT64/10) ){
+ s *= 10;
+ e--;
+ }
+ while( e<0 && (s%10)==0 ){
+ s /= 10;
+ e++;
+ }
- if( e==0 ){ /*OPTIMIZATION-IF-TRUE*/
- result = (double)s;
+ if( e==0 ){
+ *pResult = s;
+ }else if( sqlite3Config.bUseLongDouble ){
+ LONGDOUBLE_TYPE r = (LONGDOUBLE_TYPE)s;
+ if( e>0 ){
+ while( e>=100 ){ e-=100; r *= 1.0e+100L; }
+ while( e>=10 ){ e-=10; r *= 1.0e+10L; }
+ while( e>=1 ){ e-=1; r *= 1.0e+01L; }
}else{
- /* attempt to handle extremely small/large numbers better */
- if( e>307 ){ /*OPTIMIZATION-IF-TRUE*/
- if( e<342 ){ /*OPTIMIZATION-IF-TRUE*/
- LONGDOUBLE_TYPE scale = sqlite3Pow10(e-308);
- if( esign<0 ){
- result = s / scale;
- result /= 1.0e+308;
- }else{
- result = s * scale;
- result *= 1.0e+308;
- }
- }else{ assert( e>=342 );
- if( esign<0 ){
- result = 0.0*s;
- }else{
+ while( e<=-100 ){ e+=100; r *= 1.0e-100L; }
+ while( e<=-10 ){ e+=10; r *= 1.0e-10L; }
+ while( e<=-1 ){ e+=1; r *= 1.0e-01L; }
+ }
+ assert( r>=0.0 );
+ if( r>+1.7976931348623157081452742373e+308L ){
#ifdef INFINITY
- result = INFINITY*s;
+ *pResult = +INFINITY;
#else
- result = 1e308*1e308*s; /* Infinity */
+ *pResult = 1.0e308*10.0;
#endif
- }
- }
- }else{
- LONGDOUBLE_TYPE scale = sqlite3Pow10(e);
- if( esign<0 ){
- result = s / scale;
- }else{
- result = s * scale;
- }
+ }else{
+ *pResult = (double)r;
+ }
+ }else{
+ double rr[2];
+ u64 s2;
+ rr[0] = (double)s;
+ s2 = (u64)rr[0];
+ rr[1] = s>=s2 ? (double)(s - s2) : -(double)(s2 - s);
+ if( e>0 ){
+ while( e>=100 ){
+ e -= 100;
+ dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83);
+ }
+ while( e>=10 ){
+ e -= 10;
+ dekkerMul2(rr, 1.0e+10, 0.0);
+ }
+ while( e>=1 ){
+ e -= 1;
+ dekkerMul2(rr, 1.0e+01, 0.0);
+ }
+ }else{
+ while( e<=-100 ){
+ e += 100;
+ dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117);
+ }
+ while( e<=-10 ){
+ e += 10;
+ dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27);
+ }
+ while( e<=-1 ){
+ e += 1;
+ dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18);
}
}
+ *pResult = rr[0]+rr[1];
+ if( sqlite3IsNaN(*pResult) ) *pResult = 1e300*1e300;
}
+ if( sign<0 ) *pResult = -*pResult;
+ assert( !sqlite3IsNaN(*pResult) );
- /* store the result */
- *pResult = result;
-
- /* return true if number and no extra non-whitespace chracters after */
+atof_return:
+ /* return true if number and no extra non-whitespace characters after */
if( z==zEnd && nDigit>0 && eValid && eType>0 ){
return eType;
}else if( eType>=2 && (eType==3 || eValid) && nDigit>0 ){
@@ -34637,7 +35371,7 @@ SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc
/* This test and assignment is needed only to suppress UB warnings
** from clang and -fsanitize=undefined. This test and assignment make
** the code a little larger and slower, and no harm comes from omitting
- ** them, but we must appaise the undefined-behavior pharisees. */
+ ** them, but we must appease the undefined-behavior pharisees. */
*pNum = neg ? SMALLEST_INT64 : LARGEST_INT64;
}else if( neg ){
*pNum = -(i64)u;
@@ -34715,7 +35449,9 @@ SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char *z, i64 *pOut){
}else
#endif /* SQLITE_OMIT_HEX_INTEGER */
{
- return sqlite3Atoi64(z, pOut, sqlite3Strlen30(z), SQLITE_UTF8);
+ int n = (int)(0x3fffffff&strspn(z,"+- \n\t0123456789"));
+ if( z[n] ) n++;
+ return sqlite3Atoi64(z, pOut, n, SQLITE_UTF8);
}
}
@@ -34795,6 +35531,153 @@ SQLITE_PRIVATE int sqlite3Atoi(const char *z){
}
/*
+** Decode a floating-point value into an approximate decimal
+** representation.
+**
+** Round the decimal representation to n significant digits if
+** n is positive. Or round to -n signficant digits after the
+** decimal point if n is negative. No rounding is performed if
+** n is zero.
+**
+** The significant digits of the decimal representation are
+** stored in p->z[] which is a often (but not always) a pointer
+** into the middle of p->zBuf[]. There are p->n significant digits.
+** The p->z[] array is *not* zero-terminated.
+*/
+SQLITE_PRIVATE void sqlite3FpDecode(FpDecode *p, double r, int iRound, int mxRound){
+ int i;
+ u64 v;
+ int e, exp = 0;
+ p->isSpecial = 0;
+ p->z = p->zBuf;
+
+ /* Convert negative numbers to positive. Deal with Infinity, 0.0, and
+ ** NaN. */
+ if( r<0.0 ){
+ p->sign = '-';
+ r = -r;
+ }else if( r==0.0 ){
+ p->sign = '+';
+ p->n = 1;
+ p->iDP = 1;
+ p->z = "0";
+ return;
+ }else{
+ p->sign = '+';
+ }
+ memcpy(&v,&r,8);
+ e = v>>52;
+ if( (e&0x7ff)==0x7ff ){
+ p->isSpecial = 1 + (v!=0x7ff0000000000000LL);
+ p->n = 0;
+ p->iDP = 0;
+ return;
+ }
+
+ /* Multiply r by powers of ten until it lands somewhere in between
+ ** 1.0e+19 and 1.0e+17.
+ */
+ if( sqlite3Config.bUseLongDouble ){
+ LONGDOUBLE_TYPE rr = r;
+ if( rr>=1.0e+19 ){
+ while( rr>=1.0e+119L ){ exp+=100; rr *= 1.0e-100L; }
+ while( rr>=1.0e+29L ){ exp+=10; rr *= 1.0e-10L; }
+ while( rr>=1.0e+19L ){ exp++; rr *= 1.0e-1L; }
+ }else{
+ while( rr<1.0e-97L ){ exp-=100; rr *= 1.0e+100L; }
+ while( rr<1.0e+07L ){ exp-=10; rr *= 1.0e+10L; }
+ while( rr<1.0e+17L ){ exp--; rr *= 1.0e+1L; }
+ }
+ v = (u64)rr;
+ }else{
+ /* If high-precision floating point is not available using "long double",
+ ** then use Dekker-style double-double computation to increase the
+ ** precision.
+ **
+ ** The error terms on constants like 1.0e+100 computed using the
+ ** decimal extension, for example as follows:
+ **
+ ** SELECT decimal_exp(decimal_sub('1.0e+100',decimal(1.0e+100)));
+ */
+ double rr[2];
+ rr[0] = r;
+ rr[1] = 0.0;
+ if( rr[0]>9.223372036854774784e+18 ){
+ while( rr[0]>9.223372036854774784e+118 ){
+ exp += 100;
+ dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117);
+ }
+ while( rr[0]>9.223372036854774784e+28 ){
+ exp += 10;
+ dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27);
+ }
+ while( rr[0]>9.223372036854774784e+18 ){
+ exp += 1;
+ dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18);
+ }
+ }else{
+ while( rr[0]<9.223372036854774784e-83 ){
+ exp -= 100;
+ dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83);
+ }
+ while( rr[0]<9.223372036854774784e+07 ){
+ exp -= 10;
+ dekkerMul2(rr, 1.0e+10, 0.0);
+ }
+ while( rr[0]<9.22337203685477478e+17 ){
+ exp -= 1;
+ dekkerMul2(rr, 1.0e+01, 0.0);
+ }
+ }
+ v = rr[1]<0.0 ? (u64)rr[0]-(u64)(-rr[1]) : (u64)rr[0]+(u64)rr[1];
+ }
+
+
+ /* Extract significant digits. */
+ i = sizeof(p->zBuf)-1;
+ assert( v>0 );
+ while( v ){ p->zBuf[i--] = (v%10) + '0'; v /= 10; }
+ assert( i>=0 && i<sizeof(p->zBuf)-1 );
+ p->n = sizeof(p->zBuf) - 1 - i;
+ assert( p->n>0 );
+ assert( p->n<sizeof(p->zBuf) );
+ p->iDP = p->n + exp;
+ if( iRound<0 ){
+ iRound = p->iDP - iRound;
+ if( iRound==0 && p->zBuf[i+1]>='5' ){
+ iRound = 1;
+ p->zBuf[i--] = '0';
+ p->n++;
+ p->iDP++;
+ }
+ }
+ if( iRound>0 && (iRound<p->n || p->n>mxRound) ){
+ char *z = &p->zBuf[i+1];
+ if( iRound>mxRound ) iRound = mxRound;
+ p->n = iRound;
+ if( z[iRound]>='5' ){
+ int j = iRound-1;
+ while( 1 /*exit-by-break*/ ){
+ z[j]++;
+ if( z[j]<='9' ) break;
+ z[j] = '0';
+ if( j==0 ){
+ p->z[i--] = '1';
+ p->n++;
+ p->iDP++;
+ break;
+ }else{
+ j--;
+ }
+ }
+ }
+ }
+ p->z = &p->zBuf[i+1];
+ assert( i+p->n < sizeof(p->zBuf) );
+ while( ALWAYS(p->n>0) && p->z[p->n-1]=='0' ){ p->n--; }
+}
+
+/*
** Try to convert z into an unsigned 32-bit integer. Return true on
** success and false if there is an error.
**
@@ -35057,121 +35940,32 @@ SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *p, u64 *v){
** this function assumes the single-byte case has already been handled.
*/
SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *p, u32 *v){
- u32 a,b;
+ u64 v64;
+ u8 n;
- /* The 1-byte case. Overwhelmingly the most common. Handled inline
- ** by the getVarin32() macro */
- a = *p;
- /* a: p0 (unmasked) */
-#ifndef getVarint32
- if (!(a&0x80))
- {
- /* Values between 0 and 127 */
- *v = a;
- return 1;
- }
-#endif
+ /* Assume that the single-byte case has already been handled by
+ ** the getVarint32() macro */
+ assert( (p[0] & 0x80)!=0 );
- /* The 2-byte case */
- p++;
- b = *p;
- /* b: p1 (unmasked) */
- if (!(b&0x80))
- {
- /* Values between 128 and 16383 */
- a &= 0x7f;
- a = a<<7;
- *v = a | b;
+ if( (p[1] & 0x80)==0 ){
+ /* This is the two-byte case */
+ *v = ((p[0]&0x7f)<<7) | p[1];
return 2;
}
-
- /* The 3-byte case */
- p++;
- a = a<<14;
- a |= *p;
- /* a: p0<<14 | p2 (unmasked) */
- if (!(a&0x80))
- {
- /* Values between 16384 and 2097151 */
- a &= (0x7f<<14)|(0x7f);
- b &= 0x7f;
- b = b<<7;
- *v = a | b;
+ if( (p[2] & 0x80)==0 ){
+ /* This is the three-byte case */
+ *v = ((p[0]&0x7f)<<14) | ((p[1]&0x7f)<<7) | p[2];
return 3;
}
-
- /* A 32-bit varint is used to store size information in btrees.
- ** Objects are rarely larger than 2MiB limit of a 3-byte varint.
- ** A 3-byte varint is sufficient, for example, to record the size
- ** of a 1048569-byte BLOB or string.
- **
- ** We only unroll the first 1-, 2-, and 3- byte cases. The very
- ** rare larger cases can be handled by the slower 64-bit varint
- ** routine.
- */
-#if 1
- {
- u64 v64;
- u8 n;
-
- n = sqlite3GetVarint(p-2, &v64);
- assert( n>3 && n<=9 );
- if( (v64 & SQLITE_MAX_U32)!=v64 ){
- *v = 0xffffffff;
- }else{
- *v = (u32)v64;
- }
- return n;
- }
-
-#else
- /* For following code (kept for historical record only) shows an
- ** unrolling for the 3- and 4-byte varint cases. This code is
- ** slightly faster, but it is also larger and much harder to test.
- */
- p++;
- b = b<<14;
- b |= *p;
- /* b: p1<<14 | p3 (unmasked) */
- if (!(b&0x80))
- {
- /* Values between 2097152 and 268435455 */
- b &= (0x7f<<14)|(0x7f);
- a &= (0x7f<<14)|(0x7f);
- a = a<<7;
- *v = a | b;
- return 4;
- }
-
- p++;
- a = a<<14;
- a |= *p;
- /* a: p0<<28 | p2<<14 | p4 (unmasked) */
- if (!(a&0x80))
- {
- /* Values between 268435456 and 34359738367 */
- a &= SLOT_4_2_0;
- b &= SLOT_4_2_0;
- b = b<<7;
- *v = a | b;
- return 5;
- }
-
- /* We can only reach this point when reading a corrupt database
- ** file. In that case we are not in any hurry. Use the (relatively
- ** slow) general-purpose sqlite3GetVarint() routine to extract the
- ** value. */
- {
- u64 v64;
- u8 n;
-
- p -= 4;
- n = sqlite3GetVarint(p, &v64);
- assert( n>5 && n<=9 );
+ /* four or more bytes */
+ n = sqlite3GetVarint(p, &v64);
+ assert( n>3 && n<=9 );
+ if( (v64 & SQLITE_MAX_U32)!=v64 ){
+ *v = 0xffffffff;
+ }else{
*v = (u32)v64;
- return n;
}
-#endif
+ return n;
}
/*
@@ -35322,7 +36116,7 @@ SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3 *db){
}
/*
-** Attempt to add, substract, or multiply the 64-bit signed value iB against
+** Attempt to add, subtract, or multiply the 64-bit signed value iB against
** the other 64-bit signed integer at *pA and store the result in *pA.
** Return 0 on success. Or if the operation would have resulted in an
** overflow, leave *pA unchanged and return 1.
@@ -35635,7 +36429,7 @@ SQLITE_PRIVATE int sqlite3VListNameToNum(VList *pIn, const char *zName, int nNam
#define SQLITE_HWTIME_H
/*
-** The following routine only works on pentium-class (or newer) processors.
+** The following routine only works on Pentium-class (or newer) processors.
** It uses the RDTSC opcode to read the cycle count value out of the
** processor and returns that value. This can be used for high-res
** profiling.
@@ -35807,7 +36601,7 @@ static void insertElement(
}
-/* Resize the hash table so that it cantains "new_size" buckets.
+/* Resize the hash table so that it contains "new_size" buckets.
**
** The hash table might fail to resize if sqlite3_malloc() fails or
** if the new size is the same as the prior size.
@@ -36167,19 +36961,22 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* 171 */ "VCreate" OpHelp(""),
/* 172 */ "VDestroy" OpHelp(""),
/* 173 */ "VOpen" OpHelp(""),
- /* 174 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"),
- /* 175 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
- /* 176 */ "VRename" OpHelp(""),
- /* 177 */ "Pagecount" OpHelp(""),
- /* 178 */ "MaxPgcnt" OpHelp(""),
- /* 179 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"),
- /* 180 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"),
- /* 181 */ "Trace" OpHelp(""),
- /* 182 */ "CursorHint" OpHelp(""),
- /* 183 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"),
- /* 184 */ "Noop" OpHelp(""),
- /* 185 */ "Explain" OpHelp(""),
- /* 186 */ "Abortable" OpHelp(""),
+ /* 174 */ "VCheck" OpHelp(""),
+ /* 175 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"),
+ /* 176 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
+ /* 177 */ "VRename" OpHelp(""),
+ /* 178 */ "Pagecount" OpHelp(""),
+ /* 179 */ "MaxPgcnt" OpHelp(""),
+ /* 180 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"),
+ /* 181 */ "GetSubtype" OpHelp("r[P2] = r[P1].subtype"),
+ /* 182 */ "SetSubtype" OpHelp("r[P2].subtype = r[P1]"),
+ /* 183 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"),
+ /* 184 */ "Trace" OpHelp(""),
+ /* 185 */ "CursorHint" OpHelp(""),
+ /* 186 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"),
+ /* 187 */ "Noop" OpHelp(""),
+ /* 188 */ "Explain" OpHelp(""),
+ /* 189 */ "Abortable" OpHelp(""),
};
return azName[i];
}
@@ -37193,7 +37990,7 @@ SQLITE_PRIVATE int sqlite3KvvfsInit(void){
** This source file is organized into divisions where the logic for various
** subfunctions is contained within the appropriate division. PLEASE
** KEEP THE STRUCTURE OF THIS FILE INTACT. New code should be placed
-** in the correct division and should be clearly labeled.
+** in the correct division and should be clearly labelled.
**
** The layout of divisions is as follows:
**
@@ -37780,7 +38577,7 @@ static int robustFchown(int fd, uid_t uid, gid_t gid){
/*
** This is the xSetSystemCall() method of sqlite3_vfs for all of the
-** "unix" VFSes. Return SQLITE_OK opon successfully updating the
+** "unix" VFSes. Return SQLITE_OK upon successfully updating the
** system call pointer, or SQLITE_NOTFOUND if there is no configurable
** system call named zName.
*/
@@ -38302,7 +39099,7 @@ static void vxworksReleaseFileId(struct vxworksFileId *pId){
** If you close a file descriptor that points to a file that has locks,
** all locks on that file that are owned by the current process are
** released. To work around this problem, each unixInodeInfo object
-** maintains a count of the number of pending locks on tha inode.
+** maintains a count of the number of pending locks on the inode.
** When an attempt is made to close an unixFile, if there are
** other unixFile open on the same inode that are holding locks, the call
** to close() the file descriptor is deferred until all of the locks clear.
@@ -38316,7 +39113,7 @@ static void vxworksReleaseFileId(struct vxworksFileId *pId){
** not posix compliant. Under LinuxThreads, a lock created by thread
** A cannot be modified or overridden by a different thread B.
** Only thread A can modify the lock. Locking behavior is correct
-** if the appliation uses the newer Native Posix Thread Library (NPTL)
+** if the application uses the newer Native Posix Thread Library (NPTL)
** on linux - with NPTL a lock created by thread A can override locks
** in thread B. But there is no way to know at compile-time which
** threading library is being used. So there is no way to know at
@@ -38518,7 +39315,7 @@ static void storeLastErrno(unixFile *pFile, int error){
}
/*
-** Close all file descriptors accumuated in the unixInodeInfo->pUnused list.
+** Close all file descriptors accumulated in the unixInodeInfo->pUnused list.
*/
static void closePendingFds(unixFile *pFile){
unixInodeInfo *pInode = pFile->pInode;
@@ -38881,7 +39678,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
** slightly in order to be compatible with Windows95 systems simultaneously
** accessing the same database file, in case that is ever required.
**
- ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved
+ ** Symbols defined in os.h identify the 'pending byte' and the 'reserved
** byte', each single bytes at well known offsets, and the 'shared byte
** range', a range of 510 bytes at a well known offset.
**
@@ -38889,7 +39686,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
** byte'. If this is successful, 'shared byte range' is read-locked
** and the lock on the 'pending byte' released. (Legacy note: When
** SQLite was first developed, Windows95 systems were still very common,
- ** and Widnows95 lacks a shared-lock capability. So on Windows95, a
+ ** and Windows95 lacks a shared-lock capability. So on Windows95, a
** single randomly selected by from the 'shared byte range' is locked.
** Windows95 is now pretty much extinct, but this work-around for the
** lack of shared-locks on Windows95 lives on, for backwards
@@ -38910,7 +39707,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
** obtaining a write-lock on the 'pending byte'. This ensures that no new
** SHARED locks can be obtained, but existing SHARED locks are allowed to
** persist. If the call to this function fails to obtain the EXCLUSIVE
- ** lock in this case, it holds the PENDING lock intead. The client may
+ ** lock in this case, it holds the PENDING lock instead. The client may
** then re-attempt the EXCLUSIVE lock later on, after existing SHARED
** locks have cleared.
*/
@@ -38938,7 +39735,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
/* Make sure the locking sequence is correct.
** (1) We never move from unlocked to anything higher than shared lock.
- ** (2) SQLite never explicitly requests a pendig lock.
+ ** (2) SQLite never explicitly requests a pending lock.
** (3) A shared lock is always held when a reserve lock is requested.
*/
assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK );
@@ -40156,7 +40953,7 @@ static int afpLock(sqlite3_file *id, int eFileLock){
/* Make sure the locking sequence is correct
** (1) We never move from unlocked to anything higher than shared lock.
- ** (2) SQLite never explicitly requests a pendig lock.
+ ** (2) SQLite never explicitly requests a pending lock.
** (3) A shared lock is always held when a reserve lock is requested.
*/
assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK );
@@ -40272,7 +41069,7 @@ static int afpLock(sqlite3_file *id, int eFileLock){
if( !(failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST +
pInode->sharedByte, 1, 0)) ){
int failed2 = SQLITE_OK;
- /* now attemmpt to get the exclusive lock range */
+ /* now attempt to get the exclusive lock range */
failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST,
SHARED_SIZE, 1);
if( failed && (failed2 = afpSetLock(context->dbPath, pFile,
@@ -40321,9 +41118,6 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) {
unixInodeInfo *pInode;
afpLockingContext *context = (afpLockingContext *) pFile->lockingContext;
int skipShared = 0;
-#ifdef SQLITE_TEST
- int h = pFile->h;
-#endif
assert( pFile );
OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (afp)\n", pFile->h, eFileLock,
@@ -40339,9 +41133,6 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) {
assert( pInode->nShared!=0 );
if( pFile->eFileLock>SHARED_LOCK ){
assert( pInode->eFileLock==pFile->eFileLock );
- SimulateIOErrorBenign(1);
- SimulateIOError( h=(-1) )
- SimulateIOErrorBenign(0);
#ifdef SQLITE_DEBUG
/* When reducing a lock such that other processes can start
@@ -40390,9 +41181,6 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) {
unsigned long long sharedLockByte = SHARED_FIRST+pInode->sharedByte;
pInode->nShared--;
if( pInode->nShared==0 ){
- SimulateIOErrorBenign(1);
- SimulateIOError( h=(-1) )
- SimulateIOErrorBenign(0);
if( !skipShared ){
rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 0);
}
@@ -40567,7 +41355,7 @@ static int unixRead(
#endif
#if SQLITE_MAX_MMAP_SIZE>0
- /* Deal with as much of this read request as possible by transfering
+ /* Deal with as much of this read request as possible by transferring
** data from the memory mapping using memcpy(). */
if( offset<pFile->mmapSize ){
if( offset+amt <= pFile->mmapSize ){
@@ -40719,7 +41507,7 @@ static int unixWrite(
#endif
#if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0
- /* Deal with as much of this write request as possible by transfering
+ /* Deal with as much of this write request as possible by transferring
** data from the memory mapping using memcpy(). */
if( offset<pFile->mmapSize ){
if( offset+amt <= pFile->mmapSize ){
@@ -40841,7 +41629,7 @@ static int full_fsync(int fd, int fullSync, int dataOnly){
/* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a
** no-op. But go ahead and call fstat() to validate the file
** descriptor as we need a method to provoke a failure during
- ** coverate testing.
+ ** coverage testing.
*/
#ifdef SQLITE_NO_SYNC
{
@@ -41234,7 +42022,13 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){
#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
case SQLITE_FCNTL_LOCK_TIMEOUT: {
int iOld = pFile->iBusyTimeout;
+#if SQLITE_ENABLE_SETLK_TIMEOUT==1
pFile->iBusyTimeout = *(int*)pArg;
+#elif SQLITE_ENABLE_SETLK_TIMEOUT==2
+ pFile->iBusyTimeout = !!(*(int*)pArg);
+#else
+# error "SQLITE_ENABLE_SETLK_TIMEOUT must be set to 1 or 2"
+#endif
*(int*)pArg = iOld;
return SQLITE_OK;
}
@@ -41487,6 +42281,25 @@ static int unixGetpagesize(void){
** Either unixShmNode.pShmMutex must be held or unixShmNode.nRef==0 and
** unixMutexHeld() is true when reading or writing any other field
** in this structure.
+**
+** aLock[SQLITE_SHM_NLOCK]:
+** This array records the various locks held by clients on each of the
+** SQLITE_SHM_NLOCK slots. If the aLock[] entry is set to 0, then no
+** locks are held by the process on this slot. If it is set to -1, then
+** some client holds an EXCLUSIVE lock on the locking slot. If the aLock[]
+** value is set to a positive value, then it is the number of shared
+** locks currently held on the slot.
+**
+** aMutex[SQLITE_SHM_NLOCK]:
+** Normally, when SQLITE_ENABLE_SETLK_TIMEOUT is not defined, mutex
+** pShmMutex is used to protect the aLock[] array and the right to
+** call fcntl() on unixShmNode.hShm to obtain or release locks.
+**
+** If SQLITE_ENABLE_SETLK_TIMEOUT is defined though, we use an array
+** of mutexes - one for each locking slot. To read or write locking
+** slot aLock[iSlot], the caller must hold the corresponding mutex
+** aMutex[iSlot]. Similarly, to call fcntl() to obtain or release a
+** lock corresponding to slot iSlot, mutex aMutex[iSlot] must be held.
*/
struct unixShmNode {
unixInodeInfo *pInode; /* unixInodeInfo that owns this SHM node */
@@ -41500,10 +42313,11 @@ struct unixShmNode {
char **apRegion; /* Array of mapped shared-memory regions */
int nRef; /* Number of unixShm objects pointing to this */
unixShm *pFirst; /* All unixShm objects pointing to this */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ sqlite3_mutex *aMutex[SQLITE_SHM_NLOCK];
+#endif
int aLock[SQLITE_SHM_NLOCK]; /* # shared locks on slot, -1==excl lock */
#ifdef SQLITE_DEBUG
- u8 exclMask; /* Mask of exclusive locks held */
- u8 sharedMask; /* Mask of shared locks held */
u8 nextShmId; /* Next available unixShm.id value */
#endif
};
@@ -41586,16 +42400,35 @@ static int unixShmSystemLock(
struct flock f; /* The posix advisory locking structure */
int rc = SQLITE_OK; /* Result code form fcntl() */
- /* Access to the unixShmNode object is serialized by the caller */
pShmNode = pFile->pInode->pShmNode;
- assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) );
- assert( pShmNode->nRef>0 || unixMutexHeld() );
+
+ /* Assert that the parameters are within expected range and that the
+ ** correct mutex or mutexes are held. */
+ assert( pShmNode->nRef>=0 );
+ assert( (ofst==UNIX_SHM_DMS && n==1)
+ || (ofst>=UNIX_SHM_BASE && ofst+n<=(UNIX_SHM_BASE+SQLITE_SHM_NLOCK))
+ );
+ if( ofst==UNIX_SHM_DMS ){
+ assert( pShmNode->nRef>0 || unixMutexHeld() );
+ assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) );
+ }else{
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ int ii;
+ for(ii=ofst-UNIX_SHM_BASE; ii<ofst-UNIX_SHM_BASE+n; ii++){
+ assert( sqlite3_mutex_held(pShmNode->aMutex[ii]) );
+ }
+#else
+ assert( sqlite3_mutex_held(pShmNode->pShmMutex) );
+ assert( pShmNode->nRef>0 );
+#endif
+ }
/* Shared locks never span more than one byte */
assert( n==1 || lockType!=F_RDLCK );
/* Locks are within range */
assert( n>=1 && n<=SQLITE_SHM_NLOCK );
+ assert( ofst>=UNIX_SHM_BASE && ofst<=(UNIX_SHM_DMS+SQLITE_SHM_NLOCK) );
if( pShmNode->hShm>=0 ){
int res;
@@ -41606,7 +42439,7 @@ static int unixShmSystemLock(
f.l_len = n;
res = osSetPosixAdvisoryLock(pShmNode->hShm, &f, pFile);
if( res==-1 ){
-#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+#if defined(SQLITE_ENABLE_SETLK_TIMEOUT) && SQLITE_ENABLE_SETLK_TIMEOUT==1
rc = (pFile->iBusyTimeout ? SQLITE_BUSY_TIMEOUT : SQLITE_BUSY);
#else
rc = SQLITE_BUSY;
@@ -41614,39 +42447,28 @@ static int unixShmSystemLock(
}
}
- /* Update the global lock state and do debug tracing */
+ /* Do debug tracing */
#ifdef SQLITE_DEBUG
- { u16 mask;
OSTRACE(("SHM-LOCK "));
- mask = ofst>31 ? 0xffff : (1<<(ofst+n)) - (1<<ofst);
if( rc==SQLITE_OK ){
if( lockType==F_UNLCK ){
- OSTRACE(("unlock %d ok", ofst));
- pShmNode->exclMask &= ~mask;
- pShmNode->sharedMask &= ~mask;
+ OSTRACE(("unlock %d..%d ok\n", ofst, ofst+n-1));
}else if( lockType==F_RDLCK ){
- OSTRACE(("read-lock %d ok", ofst));
- pShmNode->exclMask &= ~mask;
- pShmNode->sharedMask |= mask;
+ OSTRACE(("read-lock %d..%d ok\n", ofst, ofst+n-1));
}else{
assert( lockType==F_WRLCK );
- OSTRACE(("write-lock %d ok", ofst));
- pShmNode->exclMask |= mask;
- pShmNode->sharedMask &= ~mask;
+ OSTRACE(("write-lock %d..%d ok\n", ofst, ofst+n-1));
}
}else{
if( lockType==F_UNLCK ){
- OSTRACE(("unlock %d failed", ofst));
+ OSTRACE(("unlock %d..%d failed\n", ofst, ofst+n-1));
}else if( lockType==F_RDLCK ){
- OSTRACE(("read-lock failed"));
+ OSTRACE(("read-lock %d..%d failed\n", ofst, ofst+n-1));
}else{
assert( lockType==F_WRLCK );
- OSTRACE(("write-lock %d failed", ofst));
+ OSTRACE(("write-lock %d..%d failed\n", ofst, ofst+n-1));
}
}
- OSTRACE((" - afterwards %03x,%03x\n",
- pShmNode->sharedMask, pShmNode->exclMask));
- }
#endif
return rc;
@@ -41683,6 +42505,11 @@ static void unixShmPurge(unixFile *pFd){
int i;
assert( p->pInode==pFd->pInode );
sqlite3_mutex_free(p->pShmMutex);
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ for(i=0; i<SQLITE_SHM_NLOCK; i++){
+ sqlite3_mutex_free(p->aMutex[i]);
+ }
+#endif
for(i=0; i<p->nRegion; i+=nShmPerMap){
if( p->hShm>=0 ){
osMunmap(p->apRegion[i], p->szRegion);
@@ -41742,7 +42569,20 @@ static int unixLockSharedMemory(unixFile *pDbFd, unixShmNode *pShmNode){
pShmNode->isUnlocked = 1;
rc = SQLITE_READONLY_CANTINIT;
}else{
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ /* Do not use a blocking lock here. If the lock cannot be obtained
+ ** immediately, it means some other connection is truncating the
+ ** *-shm file. And after it has done so, it will not release its
+ ** lock, but only downgrade it to a shared lock. So no point in
+ ** blocking here. The call below to obtain the shared DMS lock may
+ ** use a blocking lock. */
+ int iSaveTimeout = pDbFd->iBusyTimeout;
+ pDbFd->iBusyTimeout = 0;
+#endif
rc = unixShmSystemLock(pDbFd, F_WRLCK, UNIX_SHM_DMS, 1);
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ pDbFd->iBusyTimeout = iSaveTimeout;
+#endif
/* The first connection to attach must truncate the -shm file. We
** truncate to 3 bytes (an arbitrary small number, less than the
** -shm header size) rather than 0 as a system debugging aid, to
@@ -41863,6 +42703,18 @@ static int unixOpenSharedMemory(unixFile *pDbFd){
rc = SQLITE_NOMEM_BKPT;
goto shm_open_err;
}
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ {
+ int ii;
+ for(ii=0; ii<SQLITE_SHM_NLOCK; ii++){
+ pShmNode->aMutex[ii] = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST);
+ if( pShmNode->aMutex[ii]==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto shm_open_err;
+ }
+ }
+ }
+#endif
}
if( pInode->bProcessLock==0 ){
@@ -42084,9 +42936,11 @@ shmpage_out:
*/
#ifdef SQLITE_DEBUG
static int assertLockingArrayOk(unixShmNode *pShmNode){
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ return 1;
+#else
unixShm *pX;
int aLock[SQLITE_SHM_NLOCK];
- assert( sqlite3_mutex_held(pShmNode->pShmMutex) );
memset(aLock, 0, sizeof(aLock));
for(pX=pShmNode->pFirst; pX; pX=pX->pNext){
@@ -42104,13 +42958,14 @@ static int assertLockingArrayOk(unixShmNode *pShmNode){
assert( 0==memcmp(pShmNode->aLock, aLock, sizeof(aLock)) );
return (memcmp(pShmNode->aLock, aLock, sizeof(aLock))==0);
+#endif
}
#endif
/*
** Change the lock state for a shared-memory segment.
**
-** Note that the relationship between SHAREd and EXCLUSIVE locks is a little
+** Note that the relationship between SHARED and EXCLUSIVE locks is a little
** different here than in posix. In xShmLock(), one can go from unlocked
** to shared and back or from unlocked to exclusive and back. But one may
** not go from shared to exclusive or from exclusive to shared.
@@ -42125,7 +42980,7 @@ static int unixShmLock(
unixShm *p; /* The shared memory being locked */
unixShmNode *pShmNode; /* The underlying file iNode */
int rc = SQLITE_OK; /* Result code */
- u16 mask; /* Mask of locks to take or release */
+ u16 mask = (1<<(ofst+n)) - (1<<ofst); /* Mask of locks to take or release */
int *aLock;
p = pDbFd->pShm;
@@ -42160,88 +43015,151 @@ static int unixShmLock(
** It is not permitted to block on the RECOVER lock.
*/
#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
- assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || (
- (ofst!=2) /* not RECOVER */
- && (ofst!=1 || (p->exclMask|p->sharedMask)==0)
- && (ofst!=0 || (p->exclMask|p->sharedMask)<3)
- && (ofst<3 || (p->exclMask|p->sharedMask)<(1<<ofst))
- ));
+ {
+ u16 lockMask = (p->exclMask|p->sharedMask);
+ assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || (
+ (ofst!=2) /* not RECOVER */
+ && (ofst!=1 || lockMask==0 || lockMask==2)
+ && (ofst!=0 || lockMask<3)
+ && (ofst<3 || lockMask<(1<<ofst))
+ ));
+ }
#endif
- mask = (1<<(ofst+n)) - (1<<ofst);
- assert( n>1 || mask==(1<<ofst) );
- sqlite3_mutex_enter(pShmNode->pShmMutex);
- assert( assertLockingArrayOk(pShmNode) );
- if( flags & SQLITE_SHM_UNLOCK ){
- if( (p->exclMask|p->sharedMask) & mask ){
- int ii;
- int bUnlock = 1;
+ /* Check if there is any work to do. There are three cases:
+ **
+ ** a) An unlock operation where there are locks to unlock,
+ ** b) An shared lock where the requested lock is not already held
+ ** c) An exclusive lock where the requested lock is not already held
+ **
+ ** The SQLite core never requests an exclusive lock that it already holds.
+ ** This is assert()ed below.
+ */
+ assert( flags!=(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK)
+ || 0==(p->exclMask & mask)
+ );
+ if( ((flags & SQLITE_SHM_UNLOCK) && ((p->exclMask|p->sharedMask) & mask))
+ || (flags==(SQLITE_SHM_SHARED|SQLITE_SHM_LOCK) && 0==(p->sharedMask & mask))
+ || (flags==(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK))
+ ){
- for(ii=ofst; ii<ofst+n; ii++){
- if( aLock[ii]>((p->sharedMask & (1<<ii)) ? 1 : 0) ){
- bUnlock = 0;
- }
+ /* Take the required mutexes. In SETLK_TIMEOUT mode (blocking locks), if
+ ** this is an attempt on an exclusive lock use sqlite3_mutex_try(). If any
+ ** other thread is holding this mutex, then it is either holding or about
+ ** to hold a lock exclusive to the one being requested, and we may
+ ** therefore return SQLITE_BUSY to the caller.
+ **
+ ** Doing this prevents some deadlock scenarios. For example, thread 1 may
+ ** be a checkpointer blocked waiting on the WRITER lock. And thread 2
+ ** may be a normal SQL client upgrading to a write transaction. In this
+ ** case thread 2 does a non-blocking request for the WRITER lock. But -
+ ** if it were to use sqlite3_mutex_enter() then it would effectively
+ ** become a (doomed) blocking request, as thread 2 would block until thread
+ ** 1 obtained WRITER and released the mutex. Since thread 2 already holds
+ ** a lock on a read-locking slot at this point, this breaks the
+ ** anti-deadlock rules (see above). */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ int iMutex;
+ for(iMutex=ofst; iMutex<ofst+n; iMutex++){
+ if( flags==(SQLITE_SHM_LOCK|SQLITE_SHM_EXCLUSIVE) ){
+ rc = sqlite3_mutex_try(pShmNode->aMutex[iMutex]);
+ if( rc!=SQLITE_OK ) goto leave_shmnode_mutexes;
+ }else{
+ sqlite3_mutex_enter(pShmNode->aMutex[iMutex]);
}
+ }
+#else
+ sqlite3_mutex_enter(pShmNode->pShmMutex);
+#endif
- if( bUnlock ){
- rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n);
- if( rc==SQLITE_OK ){
- memset(&aLock[ofst], 0, sizeof(int)*n);
+ if( ALWAYS(rc==SQLITE_OK) ){
+ if( flags & SQLITE_SHM_UNLOCK ){
+ /* Case (a) - unlock. */
+ int bUnlock = 1;
+ assert( (p->exclMask & p->sharedMask)==0 );
+ assert( !(flags & SQLITE_SHM_EXCLUSIVE) || (p->exclMask & mask)==mask );
+ assert( !(flags & SQLITE_SHM_SHARED) || (p->sharedMask & mask)==mask );
+
+ /* If this is a SHARED lock being unlocked, it is possible that other
+ ** clients within this process are holding the same SHARED lock. In
+ ** this case, set bUnlock to 0 so that the posix lock is not removed
+ ** from the file-descriptor below. */
+ if( flags & SQLITE_SHM_SHARED ){
+ assert( n==1 );
+ assert( aLock[ofst]>=1 );
+ if( aLock[ofst]>1 ){
+ bUnlock = 0;
+ aLock[ofst]--;
+ p->sharedMask &= ~mask;
+ }
}
- }else if( ALWAYS(p->sharedMask & (1<<ofst)) ){
- assert( n==1 && aLock[ofst]>1 );
- aLock[ofst]--;
- }
- /* Undo the local locks */
- if( rc==SQLITE_OK ){
- p->exclMask &= ~mask;
- p->sharedMask &= ~mask;
- }
- }
- }else if( flags & SQLITE_SHM_SHARED ){
- assert( n==1 );
- assert( (p->exclMask & (1<<ofst))==0 );
- if( (p->sharedMask & mask)==0 ){
- if( aLock[ofst]<0 ){
- rc = SQLITE_BUSY;
- }else if( aLock[ofst]==0 ){
- rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n);
- }
+ if( bUnlock ){
+ rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n);
+ if( rc==SQLITE_OK ){
+ memset(&aLock[ofst], 0, sizeof(int)*n);
+ p->sharedMask &= ~mask;
+ p->exclMask &= ~mask;
+ }
+ }
+ }else if( flags & SQLITE_SHM_SHARED ){
+ /* Case (b) - a shared lock. */
- /* Get the local shared locks */
- if( rc==SQLITE_OK ){
- p->sharedMask |= mask;
- aLock[ofst]++;
- }
- }
- }else{
- /* Make sure no sibling connections hold locks that will block this
- ** lock. If any do, return SQLITE_BUSY right away. */
- int ii;
- for(ii=ofst; ii<ofst+n; ii++){
- assert( (p->sharedMask & mask)==0 );
- if( ALWAYS((p->exclMask & (1<<ii))==0) && aLock[ii] ){
- rc = SQLITE_BUSY;
- break;
- }
- }
+ if( aLock[ofst]<0 ){
+ /* An exclusive lock is held by some other connection. BUSY. */
+ rc = SQLITE_BUSY;
+ }else if( aLock[ofst]==0 ){
+ rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n);
+ }
- /* Get the exclusive locks at the system level. Then if successful
- ** also update the in-memory values. */
- if( rc==SQLITE_OK ){
- rc = unixShmSystemLock(pDbFd, F_WRLCK, ofst+UNIX_SHM_BASE, n);
- if( rc==SQLITE_OK ){
+ /* Get the local shared locks */
+ if( rc==SQLITE_OK ){
+ p->sharedMask |= mask;
+ aLock[ofst]++;
+ }
+ }else{
+ /* Case (c) - an exclusive lock. */
+ int ii;
+
+ assert( flags==(SQLITE_SHM_LOCK|SQLITE_SHM_EXCLUSIVE) );
assert( (p->sharedMask & mask)==0 );
- p->exclMask |= mask;
+ assert( (p->exclMask & mask)==0 );
+
+ /* Make sure no sibling connections hold locks that will block this
+ ** lock. If any do, return SQLITE_BUSY right away. */
for(ii=ofst; ii<ofst+n; ii++){
- aLock[ii] = -1;
+ if( aLock[ii] ){
+ rc = SQLITE_BUSY;
+ break;
+ }
+ }
+
+ /* Get the exclusive locks at the system level. Then if successful
+ ** also update the in-memory values. */
+ if( rc==SQLITE_OK ){
+ rc = unixShmSystemLock(pDbFd, F_WRLCK, ofst+UNIX_SHM_BASE, n);
+ if( rc==SQLITE_OK ){
+ p->exclMask |= mask;
+ for(ii=ofst; ii<ofst+n; ii++){
+ aLock[ii] = -1;
+ }
+ }
}
}
+ assert( assertLockingArrayOk(pShmNode) );
}
+
+ /* Drop the mutexes acquired above. */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ leave_shmnode_mutexes:
+ for(iMutex--; iMutex>=ofst; iMutex--){
+ sqlite3_mutex_leave(pShmNode->aMutex[iMutex]);
+ }
+#else
+ sqlite3_mutex_leave(pShmNode->pShmMutex);
+#endif
}
- assert( assertLockingArrayOk(pShmNode) );
- sqlite3_mutex_leave(pShmNode->pShmMutex);
+
OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n",
p->id, osGetpid(0), p->sharedMask, p->exclMask));
return rc;
@@ -42491,11 +43409,16 @@ static int unixFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
#if SQLITE_MAX_MMAP_SIZE>0
if( pFd->mmapSizeMax>0 ){
+ /* Ensure that there is always at least a 256 byte buffer of addressable
+ ** memory following the returned page. If the database is corrupt,
+ ** SQLite may overread the page slightly (in practice only a few bytes,
+ ** but 256 is safe, round, number). */
+ const int nEofBuffer = 256;
if( pFd->pMapRegion==0 ){
int rc = unixMapfile(pFd, -1);
if( rc!=SQLITE_OK ) return rc;
}
- if( pFd->mmapSize >= iOff+nAmt ){
+ if( pFd->mmapSize >= (iOff+nAmt+nEofBuffer) ){
*pp = &((u8 *)pFd->pMapRegion)[iOff];
pFd->nFetchOut++;
}
@@ -43886,12 +44809,17 @@ static int unixRandomness(sqlite3_vfs *NotUsed, int nBuf, char *zBuf){
** than the argument.
*/
static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){
-#if OS_VXWORKS || _POSIX_C_SOURCE >= 199309L
+#if !defined(HAVE_NANOSLEEP) || HAVE_NANOSLEEP+0
struct timespec sp;
-
sp.tv_sec = microseconds / 1000000;
sp.tv_nsec = (microseconds % 1000000) * 1000;
+
+ /* Almost all modern unix systems support nanosleep(). But if you are
+ ** compiling for one of the rare exceptions, you can use
+ ** -DHAVE_NANOSLEEP=0 (perhaps in conjuction with -DHAVE_USLEEP if
+ ** usleep() is available) in order to bypass the use of nanosleep() */
nanosleep(&sp, NULL);
+
UNUSED_PARAMETER(NotUsed);
return microseconds;
#elif defined(HAVE_USLEEP) && HAVE_USLEEP
@@ -46481,7 +47409,7 @@ static struct win_syscall {
/*
** This is the xSetSystemCall() method of sqlite3_vfs for all of the
-** "win32" VFSes. Return SQLITE_OK opon successfully updating the
+** "win32" VFSes. Return SQLITE_OK upon successfully updating the
** system call pointer, or SQLITE_NOTFOUND if there is no configurable
** system call named zName.
*/
@@ -48061,7 +48989,7 @@ static int winRead(
pFile->h, pBuf, amt, offset, pFile->locktype));
#if SQLITE_MAX_MMAP_SIZE>0
- /* Deal with as much of this read request as possible by transfering
+ /* Deal with as much of this read request as possible by transferring
** data from the memory mapping using memcpy(). */
if( offset<pFile->mmapSize ){
if( offset+amt <= pFile->mmapSize ){
@@ -48139,7 +49067,7 @@ static int winWrite(
pFile->h, pBuf, amt, offset, pFile->locktype));
#if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0
- /* Deal with as much of this write request as possible by transfering
+ /* Deal with as much of this write request as possible by transferring
** data from the memory mapping using memcpy(). */
if( offset<pFile->mmapSize ){
if( offset+amt <= pFile->mmapSize ){
@@ -48249,7 +49177,7 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
** all references to memory-mapped content are closed. That is doable,
** but involves adding a few branches in the common write code path which
** could slow down normal operations slightly. Hence, we have decided for
- ** now to simply make trancations a no-op if there are pending reads. We
+ ** now to simply make transactions a no-op if there are pending reads. We
** can maybe revisit this decision in the future.
*/
return SQLITE_OK;
@@ -48308,7 +49236,7 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
#ifdef SQLITE_TEST
/*
** Count the number of fullsyncs and normal syncs. This is used to test
-** that syncs and fullsyncs are occuring at the right times.
+** that syncs and fullsyncs are occurring at the right times.
*/
SQLITE_API int sqlite3_sync_count = 0;
SQLITE_API int sqlite3_fullsync_count = 0;
@@ -48665,7 +49593,7 @@ static int winLock(sqlite3_file *id, int locktype){
*/
if( locktype==EXCLUSIVE_LOCK && res ){
assert( pFile->locktype>=SHARED_LOCK );
- res = winUnlockReadLock(pFile);
+ (void)winUnlockReadLock(pFile);
res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, SHARED_FIRST, 0,
SHARED_SIZE, 0);
if( res ){
@@ -49843,6 +50771,11 @@ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
#if SQLITE_MAX_MMAP_SIZE>0
if( pFd->mmapSizeMax>0 ){
+ /* Ensure that there is always at least a 256 byte buffer of addressable
+ ** memory following the returned page. If the database is corrupt,
+ ** SQLite may overread the page slightly (in practice only a few bytes,
+ ** but 256 is safe, round, number). */
+ const int nEofBuffer = 256;
if( pFd->pMapRegion==0 ){
int rc = winMapfile(pFd, -1);
if( rc!=SQLITE_OK ){
@@ -49851,7 +50784,7 @@ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
return rc;
}
}
- if( pFd->mmapSize >= iOff+nAmt ){
+ if( pFd->mmapSize >= (iOff+nAmt+nEofBuffer) ){
assert( pFd->pMapRegion!=0 );
*pp = &((u8 *)pFd->pMapRegion)[iOff];
pFd->nFetchOut++;
@@ -50069,6 +51002,7 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789";
size_t i, j;
+ DWORD pid;
int nPre = sqlite3Strlen30(SQLITE_TEMP_FILE_PREFIX);
int nMax, nBuf, nDir, nLen;
char *zBuf;
@@ -50281,7 +51215,10 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){
j = sqlite3Strlen30(zBuf);
sqlite3_randomness(15, &zBuf[j]);
+ pid = osGetCurrentProcessId();
for(i=0; i<15; i++, j++){
+ zBuf[j] += pid & 0xff;
+ pid >>= 8;
zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ];
}
zBuf[j] = 0;
@@ -52646,7 +53583,7 @@ SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec *p, u32 i){
h = BITVEC_HASH(i++);
/* if there wasn't a hash collision, and this doesn't */
/* completely fill the hash, then just add it without */
- /* worring about sub-dividing and re-hashing. */
+ /* worrying about sub-dividing and re-hashing. */
if( !p->u.aHash[h] ){
if (p->nSet<(BITVEC_NINT-1)) {
goto bitvec_set_end;
@@ -52979,7 +53916,7 @@ struct PCache {
** Return 1 if pPg is on the dirty list for pCache. Return 0 if not.
** This routine runs inside of assert() statements only.
*/
-#ifdef SQLITE_DEBUG
+#if defined(SQLITE_ENABLE_EXPENSIVE_ASSERT)
static int pageOnDirtyList(PCache *pCache, PgHdr *pPg){
PgHdr *p;
for(p=pCache->pDirty; p; p=p->pDirtyNext){
@@ -52987,6 +53924,16 @@ static int pageOnDirtyList(PCache *pCache, PgHdr *pPg){
}
return 0;
}
+static int pageNotOnDirtyList(PCache *pCache, PgHdr *pPg){
+ PgHdr *p;
+ for(p=pCache->pDirty; p; p=p->pDirtyNext){
+ if( p==pPg ) return 0;
+ }
+ return 1;
+}
+#else
+# define pageOnDirtyList(A,B) 1
+# define pageNotOnDirtyList(A,B) 1
#endif
/*
@@ -53007,7 +53954,7 @@ SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr *pPg){
assert( pCache!=0 ); /* Every page has an associated PCache */
if( pPg->flags & PGHDR_CLEAN ){
assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */
- assert( !pageOnDirtyList(pCache, pPg) );/* CLEAN pages not on dirty list */
+ assert( pageNotOnDirtyList(pCache, pPg) );/* CLEAN pages not on dirtylist */
}else{
assert( (pPg->flags & PGHDR_DIRTY)!=0 );/* If not CLEAN must be DIRTY */
assert( pPg->pDirtyNext==0 || pPg->pDirtyNext->pDirtyPrev==pPg );
@@ -53143,7 +54090,7 @@ static int numberOfCachePages(PCache *p){
return p->szCache;
}else{
i64 n;
- /* IMPLEMANTATION-OF: R-59858-46238 If the argument N is negative, then the
+ /* IMPLEMENTATION-OF: R-59858-46238 If the argument N is negative, then the
** number of cache pages is adjusted to be a number of pages that would
** use approximately abs(N*1024) bytes of memory based on the current
** page size. */
@@ -53631,7 +54578,7 @@ static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){
}
/*
-** Sort the list of pages in accending order by pgno. Pages are
+** Sort the list of pages in ascending order by pgno. Pages are
** connected by pDirty pointers. The pDirtyPrev pointers are
** corrupted by this sort.
**
@@ -53871,7 +54818,7 @@ SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHd
** If N is positive, then N pages worth of memory are allocated using a single
** sqlite3Malloc() call and that memory is used for the first N pages allocated.
** Or if N is negative, then -1024*N bytes of memory are allocated and used
-** for as many pages as can be accomodated.
+** for as many pages as can be accommodated.
**
** Only one of (2) or (3) can be used. Once the memory available to (2) or
** (3) is exhausted, subsequent allocations fail over to the general-purpose
@@ -53905,7 +54852,7 @@ typedef struct PGroup PGroup;
** in memory directly after the associated page data, if the database is
** corrupt, code at the b-tree layer may overread the page buffer and
** read part of this structure before the corruption is detected. This
-** can cause a valgrind error if the unitialized gap is accessed. Using u16
+** can cause a valgrind error if the uninitialized gap is accessed. Using u16
** ensures there is no such gap, and therefore no bytes of uninitialized
** memory in the structure.
**
@@ -55125,7 +56072,7 @@ SQLITE_PRIVATE void sqlite3PcacheStats(
** The TEST primitive includes a "batch" number. The TEST primitive
** will only see elements that were inserted before the last change
** in the batch number. In other words, if an INSERT occurs between
-** two TESTs where the TESTs have the same batch nubmer, then the
+** two TESTs where the TESTs have the same batch number, then the
** value added by the INSERT will not be visible to the second TEST.
** The initial batch number is zero, so if the very first TEST contains
** a non-zero batch number, it will see all prior INSERTs.
@@ -55657,6 +56604,7 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64
# define sqlite3WalFramesize(z) 0
# define sqlite3WalFindFrame(x,y,z) 0
# define sqlite3WalFile(x) 0
+# undef SQLITE_USE_SEH
#else
#define WAL_SAVEPOINT_NDATA 4
@@ -55763,6 +56711,10 @@ SQLITE_PRIVATE int sqlite3WalWriteLock(Wal *pWal, int bLock);
SQLITE_PRIVATE void sqlite3WalDb(Wal *pWal, sqlite3 *db);
#endif
+#ifdef SQLITE_USE_SEH
+SQLITE_PRIVATE int sqlite3WalSystemErrno(Wal*);
+#endif
+
#endif /* ifndef SQLITE_OMIT_WAL */
#endif /* SQLITE_WAL_H */
@@ -56048,7 +57000,7 @@ int sqlite3PagerTrace=1; /* True to enable tracing */
** outstanding transactions have been abandoned, the pager is able to
** transition back to OPEN state, discarding the contents of the
** page-cache and any other in-memory state at the same time. Everything
-** is reloaded from disk (and, if necessary, hot-journal rollback peformed)
+** is reloaded from disk (and, if necessary, hot-journal rollback performed)
** when a read-transaction is next opened on the pager (transitioning
** the pager into READER state). At that point the system has recovered
** from the error.
@@ -56435,7 +57387,7 @@ struct Pager {
char *zJournal; /* Name of the journal file */
int (*xBusyHandler)(void*); /* Function to call when busy */
void *pBusyHandlerArg; /* Context argument for xBusyHandler */
- int aStat[4]; /* Total cache hits, misses, writes, spills */
+ u32 aStat[4]; /* Total cache hits, misses, writes, spills */
#ifdef SQLITE_TEST
int nRead; /* Database pages read */
#endif
@@ -56565,9 +57517,8 @@ SQLITE_PRIVATE int sqlite3PagerDirectReadOk(Pager *pPager, Pgno pgno){
#ifndef SQLITE_OMIT_WAL
if( pPager->pWal ){
u32 iRead = 0;
- int rc;
- rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iRead);
- return (rc==SQLITE_OK && iRead==0);
+ (void)sqlite3WalFindFrame(pPager->pWal, pgno, &iRead);
+ return iRead==0;
}
#endif
return 1;
@@ -57239,9 +58190,32 @@ static int writeJournalHdr(Pager *pPager){
memset(zHeader, 0, sizeof(aJournalMagic)+4);
}
+
+
/* The random check-hash initializer */
- sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit);
+ if( pPager->journalMode!=PAGER_JOURNALMODE_MEMORY ){
+ sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit);
+ }
+#ifdef SQLITE_DEBUG
+ else{
+ /* The Pager.cksumInit variable is usually randomized above to protect
+ ** against there being existing records in the journal file. This is
+ ** dangerous, as following a crash they may be mistaken for records
+ ** written by the current transaction and rolled back into the database
+ ** file, causing corruption. The following assert statements verify
+ ** that this is not required in "journal_mode=memory" mode, as in that
+ ** case the journal file is always 0 bytes in size at this point.
+ ** It is advantageous to avoid the sqlite3_randomness() call if possible
+ ** as it takes the global PRNG mutex. */
+ i64 sz = 0;
+ sqlite3OsFileSize(pPager->jfd, &sz);
+ assert( sz==0 );
+ assert( pPager->journalOff==journalHdrOffset(pPager) );
+ assert( sqlite3JournalIsInMemory(pPager->jfd) );
+ }
+#endif
put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit);
+
/* The initial database size */
put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbOrigSize);
/* The assumed sector size for this process */
@@ -57421,7 +58395,7 @@ static int readJournalHdr(
** + 4 bytes: super-journal name checksum.
** + 8 bytes: aJournalMagic[].
**
-** The super-journal page checksum is the sum of the bytes in thesuper-journal
+** The super-journal page checksum is the sum of the bytes in the super-journal
** name, where each byte is interpreted as a signed 8-bit integer.
**
** If zSuper is a NULL pointer (occurs for a single database transaction),
@@ -57474,7 +58448,7 @@ static int writeSuperJournal(Pager *pPager, const char *zSuper){
}
pPager->journalOff += (nSuper+20);
- /* If the pager is in peristent-journal mode, then the physical
+ /* If the pager is in persistent-journal mode, then the physical
** journal-file may extend past the end of the super-journal name
** and 8 bytes of magic data just written to the file. This is
** dangerous because the code to rollback a hot-journal file
@@ -57644,7 +58618,7 @@ static void pager_unlock(Pager *pPager){
/*
** This function is called whenever an IOERR or FULL error that requires
-** the pager to transition into the ERROR state may ahve occurred.
+** the pager to transition into the ERROR state may have occurred.
** The first argument is a pointer to the pager structure, the second
** the error-code about to be returned by a pager API function. The
** value returned is a copy of the second argument to this function.
@@ -57885,6 +58859,9 @@ static int pager_end_transaction(Pager *pPager, int hasSuper, int bCommit){
return (rc==SQLITE_OK?rc2:rc);
}
+/* Forward reference */
+static int pager_playback(Pager *pPager, int isHot);
+
/*
** Execute a rollback if a transaction is active and unlock the
** database file.
@@ -57913,13 +58890,28 @@ static void pagerUnlockAndRollback(Pager *pPager){
assert( pPager->eState==PAGER_READER );
pager_end_transaction(pPager, 0, 0);
}
+ }else if( pPager->eState==PAGER_ERROR
+ && pPager->journalMode==PAGER_JOURNALMODE_MEMORY
+ && isOpen(pPager->jfd)
+ ){
+ /* Special case for a ROLLBACK due to I/O error with an in-memory
+ ** journal: We have to rollback immediately, before the journal is
+ ** closed, because once it is closed, all content is forgotten. */
+ int errCode = pPager->errCode;
+ u8 eLock = pPager->eLock;
+ pPager->eState = PAGER_OPEN;
+ pPager->errCode = SQLITE_OK;
+ pPager->eLock = EXCLUSIVE_LOCK;
+ pager_playback(pPager, 1);
+ pPager->errCode = errCode;
+ pPager->eLock = eLock;
}
pager_unlock(pPager);
}
/*
** Parameter aData must point to a buffer of pPager->pageSize bytes
-** of data. Compute and return a checksum based ont the contents of the
+** of data. Compute and return a checksum based on the contents of the
** page of data and the current value of pPager->cksumInit.
**
** This is not a real checksum. It is really just the sum of the
@@ -58885,7 +59877,7 @@ static int pagerWalFrames(
assert( pPager->pWal );
assert( pList );
#ifdef SQLITE_DEBUG
- /* Verify that the page list is in accending order */
+ /* Verify that the page list is in ascending order */
for(p=pList; p && p->pDirty; p=p->pDirty){
assert( p->pgno < p->pDirty->pgno );
}
@@ -59016,7 +60008,7 @@ static int pagerPagecount(Pager *pPager, Pgno *pnPage){
#ifndef SQLITE_OMIT_WAL
/*
** Check if the *-wal file that corresponds to the database opened by pPager
-** exists if the database is not empy, or verify that the *-wal file does
+** exists if the database is not empty, or verify that the *-wal file does
** not exist (by deleting it) if the database file is empty.
**
** If the database is not empty and the *-wal file exists, open the pager
@@ -60426,11 +61418,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
int rc = SQLITE_OK; /* Return code */
int tempFile = 0; /* True for temp files (incl. in-memory files) */
int memDb = 0; /* True if this is an in-memory file */
-#ifndef SQLITE_OMIT_DESERIALIZE
int memJM = 0; /* Memory journal mode */
-#else
-# define memJM 0
-#endif
int readOnly = 0; /* True if this is a read-only file */
int journalFileSize; /* Bytes to allocate for each journal fd */
char *zPathname = 0; /* Full path to database file */
@@ -60549,12 +61537,13 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
** specific formatting and order of the various filenames, so if the format
** changes here, be sure to change it there as well.
*/
+ assert( SQLITE_PTRSIZE==sizeof(Pager*) );
pPtr = (u8 *)sqlite3MallocZero(
ROUND8(sizeof(*pPager)) + /* Pager structure */
ROUND8(pcacheSize) + /* PCache object */
ROUND8(pVfs->szOsFile) + /* The main db file */
journalFileSize * 2 + /* The two journal files */
- sizeof(pPager) + /* Space to hold a pointer */
+ SQLITE_PTRSIZE + /* Space to hold a pointer */
4 + /* Database prefix */
nPathname + 1 + /* database filename */
nUriByte + /* query parameters */
@@ -60575,7 +61564,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
pPager->sjfd = (sqlite3_file*)pPtr; pPtr += journalFileSize;
pPager->jfd = (sqlite3_file*)pPtr; pPtr += journalFileSize;
assert( EIGHT_BYTE_ALIGNMENT(pPager->jfd) );
- memcpy(pPtr, &pPager, sizeof(pPager)); pPtr += sizeof(pPager);
+ memcpy(pPtr, &pPager, SQLITE_PTRSIZE); pPtr += SQLITE_PTRSIZE;
/* Fill in the Pager.zFilename and pPager.zQueryParam fields */
pPtr += 4; /* Skip zero prefix */
@@ -60629,9 +61618,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
int fout = 0; /* VFS flags returned by xOpen() */
rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout);
assert( !memDb );
-#ifndef SQLITE_OMIT_DESERIALIZE
pPager->memVfs = memJM = (fout&SQLITE_OPEN_MEMORY)!=0;
-#endif
readOnly = (fout&SQLITE_OPEN_READONLY)!=0;
/* If the file was successfully opened for read/write access,
@@ -60768,15 +61755,18 @@ act_like_temp_file:
/*
** Return the sqlite3_file for the main database given the name
-** of the corresonding WAL or Journal name as passed into
+** of the corresponding WAL or Journal name as passed into
** xOpen.
*/
SQLITE_API sqlite3_file *sqlite3_database_file_object(const char *zName){
Pager *pPager;
+ const char *p;
while( zName[-1]!=0 || zName[-2]!=0 || zName[-3]!=0 || zName[-4]!=0 ){
zName--;
}
- pPager = *(Pager**)(zName - 4 - sizeof(Pager*));
+ p = zName - 4 - sizeof(Pager*);
+ assert( EIGHT_BYTE_ALIGNMENT(p) );
+ pPager = *(Pager**)p;
return pPager->fd;
}
@@ -61410,8 +62400,20 @@ SQLITE_PRIVATE int sqlite3PagerGet(
DbPage **ppPage, /* Write a pointer to the page here */
int flags /* PAGER_GET_XXX flags */
){
- /* printf("PAGE %u\n", pgno); fflush(stdout); */
+#if 0 /* Trace page fetch by setting to 1 */
+ int rc;
+ printf("PAGE %u\n", pgno);
+ fflush(stdout);
+ rc = pPager->xGet(pPager, pgno, ppPage, flags);
+ if( rc ){
+ printf("PAGE %u failed with 0x%02x\n", pgno, rc);
+ fflush(stdout);
+ }
+ return rc;
+#else
+ /* Normal, high-speed version of sqlite3PagerGet() */
return pPager->xGet(pPager, pgno, ppPage, flags);
+#endif
}
/*
@@ -62287,6 +63289,13 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(
rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_BEGIN_ATOMIC_WRITE, 0);
if( rc==SQLITE_OK ){
rc = pager_write_pagelist(pPager, pList);
+ if( rc==SQLITE_OK && pPager->dbSize>pPager->dbFileSize ){
+ char *pTmp = pPager->pTmpSpace;
+ int szPage = (int)pPager->pageSize;
+ memset(pTmp, 0, szPage);
+ rc = sqlite3OsWrite(pPager->fd, pTmp, szPage,
+ ((i64)pPager->dbSize*pPager->pageSize)-szPage);
+ }
if( rc==SQLITE_OK ){
rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_COMMIT_ATOMIC_WRITE, 0);
}
@@ -62521,11 +63530,11 @@ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){
a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize;
a[4] = pPager->eState;
a[5] = pPager->errCode;
- a[6] = pPager->aStat[PAGER_STAT_HIT];
- a[7] = pPager->aStat[PAGER_STAT_MISS];
+ a[6] = (int)pPager->aStat[PAGER_STAT_HIT] & 0x7fffffff;
+ a[7] = (int)pPager->aStat[PAGER_STAT_MISS] & 0x7fffffff;
a[8] = 0; /* Used to be pPager->nOvfl */
a[9] = pPager->nRead;
- a[10] = pPager->aStat[PAGER_STAT_WRITE];
+ a[10] = (int)pPager->aStat[PAGER_STAT_WRITE] & 0x7fffffff;
return a;
}
#endif
@@ -62541,7 +63550,7 @@ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){
** reset parameter is non-zero, the cache hit or miss count is zeroed before
** returning.
*/
-SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){
+SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, u64 *pnVal){
assert( eStat==SQLITE_DBSTATUS_CACHE_HIT
|| eStat==SQLITE_DBSTATUS_CACHE_MISS
@@ -63053,7 +64062,7 @@ SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){
assert( pPager->eState!=PAGER_ERROR );
pPager->journalMode = (u8)eMode;
- /* When transistioning from TRUNCATE or PERSIST to any other journal
+ /* When transitioning from TRUNCATE or PERSIST to any other journal
** mode except WAL, unless the pager is in locking_mode=exclusive mode,
** delete the journal file.
*/
@@ -63098,7 +64107,7 @@ SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){
}
assert( state==pPager->eState );
}
- }else if( eMode==PAGER_JOURNALMODE_OFF ){
+ }else if( eMode==PAGER_JOURNALMODE_OFF || eMode==PAGER_JOURNALMODE_MEMORY ){
sqlite3OsClose(pPager->jfd);
}
}
@@ -63481,6 +64490,12 @@ SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){
}
#endif
+#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL)
+SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager *pPager){
+ return sqlite3WalSystemErrno(pPager->pWal);
+}
+#endif
+
#endif /* SQLITE_OMIT_DISKIO */
/************** End of pager.c ***********************************************/
@@ -63771,7 +64786,7 @@ SQLITE_PRIVATE int sqlite3WalTrace = 0;
**
** Technically, the various VFSes are free to implement these locks however
** they see fit. However, compatibility is encouraged so that VFSes can
-** interoperate. The standard implemention used on both unix and windows
+** interoperate. The standard implementation used on both unix and windows
** is for the index number to indicate a byte offset into the
** WalCkptInfo.aLock[] array in the wal-index header. In other words, all
** locks are on the shm file. The WALINDEX_LOCK_OFFSET constant (which
@@ -63847,7 +64862,7 @@ struct WalIndexHdr {
** the mxFrame for that reader. The value READMARK_NOT_USED (0xffffffff)
** for any aReadMark[] means that entry is unused. aReadMark[0] is
** a special case; its value is never used and it exists as a place-holder
-** to avoid having to offset aReadMark[] indexs by one. Readers holding
+** to avoid having to offset aReadMark[] indexes by one. Readers holding
** WAL_READ_LOCK(0) always ignore the entire WAL and read all content
** directly from the database.
**
@@ -64015,7 +65030,15 @@ struct Wal {
u32 iReCksum; /* On commit, recalculate checksums from here */
const char *zWalName; /* Name of WAL file */
u32 nCkpt; /* Checkpoint sequence counter in the wal-header */
+#ifdef SQLITE_USE_SEH
+ u32 lockMask; /* Mask of locks held */
+ void *pFree; /* Pointer to sqlite3_free() if exception thrown */
+ u32 *pWiValue; /* Value to write into apWiData[iWiPg] */
+ int iWiPg; /* Write pWiValue into apWiData[iWiPg] */
+ int iSysErrno; /* System error code following exception */
+#endif
#ifdef SQLITE_DEBUG
+ int nSehTry; /* Number of nested SEH_TRY{} blocks */
u8 lockError; /* True if a locking error has occurred */
#endif
#ifdef SQLITE_ENABLE_SNAPSHOT
@@ -64098,6 +65121,113 @@ struct WalIterator {
)
/*
+** Structured Exception Handling (SEH) is a Windows-specific technique
+** for catching exceptions raised while accessing memory-mapped files.
+**
+** The -DSQLITE_USE_SEH compile-time option means to use SEH to catch and
+** deal with system-level errors that arise during WAL -shm file processing.
+** Without this compile-time option, any system-level faults that appear
+** while accessing the memory-mapped -shm file will cause a process-wide
+** signal to be deliver, which will more than likely cause the entire
+** process to exit.
+*/
+#ifdef SQLITE_USE_SEH
+#include <Windows.h>
+
+/* Beginning of a block of code in which an exception might occur */
+# define SEH_TRY __try { \
+ assert( walAssertLockmask(pWal) && pWal->nSehTry==0 ); \
+ VVA_ONLY(pWal->nSehTry++);
+
+/* The end of a block of code in which an exception might occur */
+# define SEH_EXCEPT(X) \
+ VVA_ONLY(pWal->nSehTry--); \
+ assert( pWal->nSehTry==0 ); \
+ } __except( sehExceptionFilter(pWal, GetExceptionCode(), GetExceptionInformation() ) ){ X }
+
+/* Simulate a memory-mapping fault in the -shm file for testing purposes */
+# define SEH_INJECT_FAULT sehInjectFault(pWal)
+
+/*
+** The second argument is the return value of GetExceptionCode() for the
+** current exception. Return EXCEPTION_EXECUTE_HANDLER if the exception code
+** indicates that the exception may have been caused by accessing the *-shm
+** file mapping. Or EXCEPTION_CONTINUE_SEARCH otherwise.
+*/
+static int sehExceptionFilter(Wal *pWal, int eCode, EXCEPTION_POINTERS *p){
+ VVA_ONLY(pWal->nSehTry--);
+ if( eCode==EXCEPTION_IN_PAGE_ERROR ){
+ if( p && p->ExceptionRecord && p->ExceptionRecord->NumberParameters>=3 ){
+ /* From MSDN: For this type of exception, the first element of the
+ ** ExceptionInformation[] array is a read-write flag - 0 if the exception
+ ** was thrown while reading, 1 if while writing. The second element is
+ ** the virtual address being accessed. The "third array element specifies
+ ** the underlying NTSTATUS code that resulted in the exception". */
+ pWal->iSysErrno = (int)p->ExceptionRecord->ExceptionInformation[2];
+ }
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+/*
+** If one is configured, invoke the xTestCallback callback with 650 as
+** the argument. If it returns true, throw the same exception that is
+** thrown by the system if the *-shm file mapping is accessed after it
+** has been invalidated.
+*/
+static void sehInjectFault(Wal *pWal){
+ int res;
+ assert( pWal->nSehTry>0 );
+
+ res = sqlite3FaultSim(650);
+ if( res!=0 ){
+ ULONG_PTR aArg[3];
+ aArg[0] = 0;
+ aArg[1] = 0;
+ aArg[2] = (ULONG_PTR)res;
+ RaiseException(EXCEPTION_IN_PAGE_ERROR, 0, 3, (const ULONG_PTR*)aArg);
+ }
+}
+
+/*
+** There are two ways to use this macro. To set a pointer to be freed
+** if an exception is thrown:
+**
+** SEH_FREE_ON_ERROR(0, pPtr);
+**
+** and to cancel the same:
+**
+** SEH_FREE_ON_ERROR(pPtr, 0);
+**
+** In the first case, there must not already be a pointer registered to
+** be freed. In the second case, pPtr must be the registered pointer.
+*/
+#define SEH_FREE_ON_ERROR(X,Y) \
+ assert( (X==0 || Y==0) && pWal->pFree==X ); pWal->pFree = Y
+
+/*
+** There are two ways to use this macro. To arrange for pWal->apWiData[iPg]
+** to be set to pValue if an exception is thrown:
+**
+** SEH_SET_ON_ERROR(iPg, pValue);
+**
+** and to cancel the same:
+**
+** SEH_SET_ON_ERROR(0, 0);
+*/
+#define SEH_SET_ON_ERROR(X,Y) pWal->iWiPg = X; pWal->pWiValue = Y
+
+#else
+# define SEH_TRY VVA_ONLY(pWal->nSehTry++);
+# define SEH_EXCEPT(X) VVA_ONLY(pWal->nSehTry--); assert( pWal->nSehTry==0 );
+# define SEH_INJECT_FAULT assert( pWal->nSehTry>0 );
+# define SEH_FREE_ON_ERROR(X,Y)
+# define SEH_SET_ON_ERROR(X,Y)
+#endif /* ifdef SQLITE_USE_SEH */
+
+
+/*
** Obtain a pointer to the iPage'th page of the wal-index. The wal-index
** is broken into pages of WALINDEX_PGSZ bytes. Wal-index pages are
** numbered from zero.
@@ -64169,6 +65299,7 @@ static int walIndexPage(
int iPage, /* The page we seek */
volatile u32 **ppPage /* Write the page pointer here */
){
+ SEH_INJECT_FAULT;
if( pWal->nWiData<=iPage || (*ppPage = pWal->apWiData[iPage])==0 ){
return walIndexPageRealloc(pWal, iPage, ppPage);
}
@@ -64180,6 +65311,7 @@ static int walIndexPage(
*/
static volatile WalCkptInfo *walCkptInfo(Wal *pWal){
assert( pWal->nWiData>0 && pWal->apWiData[0] );
+ SEH_INJECT_FAULT;
return (volatile WalCkptInfo*)&(pWal->apWiData[0][sizeof(WalIndexHdr)/2]);
}
@@ -64188,6 +65320,7 @@ static volatile WalCkptInfo *walCkptInfo(Wal *pWal){
*/
static volatile WalIndexHdr *walIndexHdr(Wal *pWal){
assert( pWal->nWiData>0 && pWal->apWiData[0] );
+ SEH_INJECT_FAULT;
return (volatile WalIndexHdr*)pWal->apWiData[0];
}
@@ -64377,7 +65510,7 @@ static int walDecodeFrame(
return 0;
}
- /* A frame is only valid if the page number is creater than zero.
+ /* A frame is only valid if the page number is greater than zero.
*/
pgno = sqlite3Get4byte(&aFrame[0]);
if( pgno==0 ){
@@ -64385,7 +65518,7 @@ static int walDecodeFrame(
}
/* A frame is only valid if a checksum of the WAL header,
- ** all prior frams, the first 16 bytes of this frame-header,
+ ** all prior frames, the first 16 bytes of this frame-header,
** and the frame-data matches the checksum in the last 8
** bytes of this frame-header.
*/
@@ -64445,12 +65578,18 @@ static int walLockShared(Wal *pWal, int lockIdx){
WALTRACE(("WAL%p: acquire SHARED-%s %s\n", pWal,
walLockName(lockIdx), rc ? "failed" : "ok"));
VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && (rc&0xFF)!=SQLITE_BUSY); )
+#ifdef SQLITE_USE_SEH
+ if( rc==SQLITE_OK ) pWal->lockMask |= (1 << lockIdx);
+#endif
return rc;
}
static void walUnlockShared(Wal *pWal, int lockIdx){
if( pWal->exclusiveMode ) return;
(void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1,
SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED);
+#ifdef SQLITE_USE_SEH
+ pWal->lockMask &= ~(1 << lockIdx);
+#endif
WALTRACE(("WAL%p: release SHARED-%s\n", pWal, walLockName(lockIdx)));
}
static int walLockExclusive(Wal *pWal, int lockIdx, int n){
@@ -64461,12 +65600,20 @@ static int walLockExclusive(Wal *pWal, int lockIdx, int n){
WALTRACE(("WAL%p: acquire EXCLUSIVE-%s cnt=%d %s\n", pWal,
walLockName(lockIdx), n, rc ? "failed" : "ok"));
VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && (rc&0xFF)!=SQLITE_BUSY); )
+#ifdef SQLITE_USE_SEH
+ if( rc==SQLITE_OK ){
+ pWal->lockMask |= (((1<<n)-1) << (SQLITE_SHM_NLOCK+lockIdx));
+ }
+#endif
return rc;
}
static void walUnlockExclusive(Wal *pWal, int lockIdx, int n){
if( pWal->exclusiveMode ) return;
(void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, n,
SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE);
+#ifdef SQLITE_USE_SEH
+ pWal->lockMask &= ~(((1<<n)-1) << (SQLITE_SHM_NLOCK+lockIdx));
+#endif
WALTRACE(("WAL%p: release EXCLUSIVE-%s cnt=%d\n", pWal,
walLockName(lockIdx), n));
}
@@ -64558,6 +65705,7 @@ static int walFramePage(u32 iFrame){
*/
static u32 walFramePgno(Wal *pWal, u32 iFrame){
int iHash = walFramePage(iFrame);
+ SEH_INJECT_FAULT;
if( iHash==0 ){
return pWal->apWiData[0][WALINDEX_HDR_SIZE/sizeof(u32) + iFrame - 1];
}
@@ -64817,6 +65965,7 @@ static int walIndexRecover(Wal *pWal){
/* Malloc a buffer to read frames into. */
szFrame = szPage + WAL_FRAME_HDRSIZE;
aFrame = (u8 *)sqlite3_malloc64(szFrame + WALINDEX_PGSZ);
+ SEH_FREE_ON_ERROR(0, aFrame);
if( !aFrame ){
rc = SQLITE_NOMEM_BKPT;
goto recovery_error;
@@ -64835,6 +65984,7 @@ static int walIndexRecover(Wal *pWal){
rc = walIndexPage(pWal, iPg, (volatile u32**)&aShare);
assert( aShare!=0 || rc!=SQLITE_OK );
if( aShare==0 ) break;
+ SEH_SET_ON_ERROR(iPg, aShare);
pWal->apWiData[iPg] = aPrivate;
for(iFrame=iFirst; iFrame<=iLast; iFrame++){
@@ -64862,6 +66012,7 @@ static int walIndexRecover(Wal *pWal){
}
}
pWal->apWiData[iPg] = aShare;
+ SEH_SET_ON_ERROR(0,0);
nHdr = (iPg==0 ? WALINDEX_HDR_SIZE : 0);
nHdr32 = nHdr / sizeof(u32);
#ifndef SQLITE_SAFER_WALINDEX_RECOVERY
@@ -64892,9 +66043,11 @@ static int walIndexRecover(Wal *pWal){
}
}
#endif
+ SEH_INJECT_FAULT;
if( iFrame<=iLast ) break;
}
+ SEH_FREE_ON_ERROR(aFrame, 0);
sqlite3_free(aFrame);
}
@@ -64922,6 +66075,7 @@ finished:
}else{
pInfo->aReadMark[i] = READMARK_NOT_USED;
}
+ SEH_INJECT_FAULT;
walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1);
}else if( rc!=SQLITE_BUSY ){
goto recovery_error;
@@ -65079,7 +66233,7 @@ SQLITE_PRIVATE int sqlite3WalOpen(
}
/*
-** Change the size to which the WAL file is trucated on each reset.
+** Change the size to which the WAL file is truncated on each reset.
*/
SQLITE_PRIVATE void sqlite3WalLimit(Wal *pWal, i64 iLimit){
if( pWal ) pWal->mxWalSize = iLimit;
@@ -65305,23 +66459,16 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){
nByte = sizeof(WalIterator)
+ (nSegment-1)*sizeof(struct WalSegment)
+ iLast*sizeof(ht_slot);
- p = (WalIterator *)sqlite3_malloc64(nByte);
+ p = (WalIterator *)sqlite3_malloc64(nByte
+ + sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast)
+ );
if( !p ){
return SQLITE_NOMEM_BKPT;
}
memset(p, 0, nByte);
p->nSegment = nSegment;
-
- /* Allocate temporary space used by the merge-sort routine. This block
- ** of memory will be freed before this function returns.
- */
- aTmp = (ht_slot *)sqlite3_malloc64(
- sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast)
- );
- if( !aTmp ){
- rc = SQLITE_NOMEM_BKPT;
- }
-
+ aTmp = (ht_slot*)&(((u8*)p)[nByte]);
+ SEH_FREE_ON_ERROR(0, p);
for(i=walFramePage(nBackfill+1); rc==SQLITE_OK && i<nSegment; i++){
WalHashLoc sLoc;
@@ -65349,9 +66496,8 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){
p->aSegment[i].aPgno = (u32 *)sLoc.aPgno;
}
}
- sqlite3_free(aTmp);
-
if( rc!=SQLITE_OK ){
+ SEH_FREE_ON_ERROR(p, 0);
walIteratorFree(p);
p = 0;
}
@@ -65360,6 +66506,19 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){
}
#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+
+
+/*
+** Attempt to enable blocking locks that block for nMs ms. Return 1 if
+** blocking locks are successfully enabled, or 0 otherwise.
+*/
+static int walEnableBlockingMs(Wal *pWal, int nMs){
+ int rc = sqlite3OsFileControl(
+ pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&nMs
+ );
+ return (rc==SQLITE_OK);
+}
+
/*
** Attempt to enable blocking locks. Blocking locks are enabled only if (a)
** they are supported by the VFS, and (b) the database handle is configured
@@ -65371,11 +66530,7 @@ static int walEnableBlocking(Wal *pWal){
if( pWal->db ){
int tmout = pWal->db->busyTimeout;
if( tmout ){
- int rc;
- rc = sqlite3OsFileControl(
- pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&tmout
- );
- res = (rc==SQLITE_OK);
+ res = walEnableBlockingMs(pWal, tmout);
}
}
return res;
@@ -65424,20 +66579,10 @@ SQLITE_PRIVATE void sqlite3WalDb(Wal *pWal, sqlite3 *db){
pWal->db = db;
}
-/*
-** Take an exclusive WRITE lock. Blocking if so configured.
-*/
-static int walLockWriter(Wal *pWal){
- int rc;
- walEnableBlocking(pWal);
- rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1);
- walDisableBlocking(pWal);
- return rc;
-}
#else
# define walEnableBlocking(x) 0
# define walDisableBlocking(x)
-# define walLockWriter(pWal) walLockExclusive((pWal), WAL_WRITE_LOCK, 1)
+# define walEnableBlockingMs(pWal, ms) 0
# define sqlite3WalDb(pWal, db)
#endif /* ifdef SQLITE_ENABLE_SETLK_TIMEOUT */
@@ -65577,13 +66722,13 @@ static int walCheckpoint(
mxSafeFrame = pWal->hdr.mxFrame;
mxPage = pWal->hdr.nPage;
for(i=1; i<WAL_NREADER; i++){
- u32 y = AtomicLoad(pInfo->aReadMark+i);
+ u32 y = AtomicLoad(pInfo->aReadMark+i); SEH_INJECT_FAULT;
if( mxSafeFrame>y ){
assert( y<=pWal->hdr.mxFrame );
rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1);
if( rc==SQLITE_OK ){
u32 iMark = (i==1 ? mxSafeFrame : READMARK_NOT_USED);
- AtomicStore(pInfo->aReadMark+i, iMark);
+ AtomicStore(pInfo->aReadMark+i, iMark); SEH_INJECT_FAULT;
walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1);
}else if( rc==SQLITE_BUSY ){
mxSafeFrame = y;
@@ -65604,8 +66749,7 @@ static int walCheckpoint(
&& (rc = walBusyLock(pWal,xBusy,pBusyArg,WAL_READ_LOCK(0),1))==SQLITE_OK
){
u32 nBackfill = pInfo->nBackfill;
-
- pInfo->nBackfillAttempted = mxSafeFrame;
+ pInfo->nBackfillAttempted = mxSafeFrame; SEH_INJECT_FAULT;
/* Sync the WAL to disk */
rc = sqlite3OsSync(pWal->pWalFd, CKPT_SYNC_FLAGS(sync_flags));
@@ -65636,6 +66780,7 @@ static int walCheckpoint(
while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){
i64 iOffset;
assert( walFramePgno(pWal, iFrame)==iDbpage );
+ SEH_INJECT_FAULT;
if( AtomicLoad(&db->u1.isInterrupted) ){
rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT;
break;
@@ -65665,7 +66810,7 @@ static int walCheckpoint(
}
}
if( rc==SQLITE_OK ){
- AtomicStore(&pInfo->nBackfill, mxSafeFrame);
+ AtomicStore(&pInfo->nBackfill, mxSafeFrame); SEH_INJECT_FAULT;
}
}
@@ -65687,6 +66832,7 @@ static int walCheckpoint(
*/
if( rc==SQLITE_OK && eMode!=SQLITE_CHECKPOINT_PASSIVE ){
assert( pWal->writeLock );
+ SEH_INJECT_FAULT;
if( pInfo->nBackfill<pWal->hdr.mxFrame ){
rc = SQLITE_BUSY;
}else if( eMode>=SQLITE_CHECKPOINT_RESTART ){
@@ -65718,6 +66864,7 @@ static int walCheckpoint(
}
walcheckpoint_out:
+ SEH_FREE_ON_ERROR(pIter, 0);
walIteratorFree(pIter);
return rc;
}
@@ -65740,6 +66887,93 @@ static void walLimitSize(Wal *pWal, i64 nMax){
}
}
+#ifdef SQLITE_USE_SEH
+/*
+** This is the "standard" exception handler used in a few places to handle
+** an exception thrown by reading from the *-shm mapping after it has become
+** invalid in SQLITE_USE_SEH builds. It is used as follows:
+**
+** SEH_TRY { ... }
+** SEH_EXCEPT( rc = walHandleException(pWal); )
+**
+** This function does three things:
+**
+** 1) Determines the locks that should be held, based on the contents of
+** the Wal.readLock, Wal.writeLock and Wal.ckptLock variables. All other
+** held locks are assumed to be transient locks that would have been
+** released had the exception not been thrown and are dropped.
+**
+** 2) Frees the pointer at Wal.pFree, if any, using sqlite3_free().
+**
+** 3) Set pWal->apWiData[pWal->iWiPg] to pWal->pWiValue if not NULL
+**
+** 4) Returns SQLITE_IOERR.
+*/
+static int walHandleException(Wal *pWal){
+ if( pWal->exclusiveMode==0 ){
+ static const int S = 1;
+ static const int E = (1<<SQLITE_SHM_NLOCK);
+ int ii;
+ u32 mUnlock = pWal->lockMask & ~(
+ (pWal->readLock<0 ? 0 : (S << WAL_READ_LOCK(pWal->readLock)))
+ | (pWal->writeLock ? (E << WAL_WRITE_LOCK) : 0)
+ | (pWal->ckptLock ? (E << WAL_CKPT_LOCK) : 0)
+ );
+ for(ii=0; ii<SQLITE_SHM_NLOCK; ii++){
+ if( (S<<ii) & mUnlock ) walUnlockShared(pWal, ii);
+ if( (E<<ii) & mUnlock ) walUnlockExclusive(pWal, ii, 1);
+ }
+ }
+ sqlite3_free(pWal->pFree);
+ pWal->pFree = 0;
+ if( pWal->pWiValue ){
+ pWal->apWiData[pWal->iWiPg] = pWal->pWiValue;
+ pWal->pWiValue = 0;
+ }
+ return SQLITE_IOERR_IN_PAGE;
+}
+
+/*
+** Assert that the Wal.lockMask mask, which indicates the locks held
+** by the connenction, is consistent with the Wal.readLock, Wal.writeLock
+** and Wal.ckptLock variables. To be used as:
+**
+** assert( walAssertLockmask(pWal) );
+*/
+static int walAssertLockmask(Wal *pWal){
+ if( pWal->exclusiveMode==0 ){
+ static const int S = 1;
+ static const int E = (1<<SQLITE_SHM_NLOCK);
+ u32 mExpect = (
+ (pWal->readLock<0 ? 0 : (S << WAL_READ_LOCK(pWal->readLock)))
+ | (pWal->writeLock ? (E << WAL_WRITE_LOCK) : 0)
+ | (pWal->ckptLock ? (E << WAL_CKPT_LOCK) : 0)
+#ifdef SQLITE_ENABLE_SNAPSHOT
+ | (pWal->pSnapshot ? (pWal->lockMask & (1 << WAL_CKPT_LOCK)) : 0)
+#endif
+ );
+ assert( mExpect==pWal->lockMask );
+ }
+ return 1;
+}
+
+/*
+** Return and zero the "system error" field set when an
+** EXCEPTION_IN_PAGE_ERROR exception is caught.
+*/
+SQLITE_PRIVATE int sqlite3WalSystemErrno(Wal *pWal){
+ int iRet = 0;
+ if( pWal ){
+ iRet = pWal->iSysErrno;
+ pWal->iSysErrno = 0;
+ }
+ return iRet;
+}
+
+#else
+# define walAssertLockmask(x) 1
+#endif /* ifdef SQLITE_USE_SEH */
+
/*
** Close a connection to a log file.
*/
@@ -65754,6 +66988,8 @@ SQLITE_PRIVATE int sqlite3WalClose(
if( pWal ){
int isDelete = 0; /* True to unlink wal and wal-index files */
+ assert( walAssertLockmask(pWal) );
+
/* If an EXCLUSIVE lock can be obtained on the database file (using the
** ordinary, rollback-mode locking methods, this guarantees that the
** connection associated with this log file is the only connection to
@@ -65778,7 +67014,7 @@ SQLITE_PRIVATE int sqlite3WalClose(
);
if( bPersist!=1 ){
/* Try to delete the WAL file if the checkpoint completed and
- ** fsyned (rc==SQLITE_OK) and if we are not in persistent-wal
+ ** fsynced (rc==SQLITE_OK) and if we are not in persistent-wal
** mode (!bPersist) */
isDelete = 1;
}else if( pWal->mxWalSize>=0 ){
@@ -65845,7 +67081,7 @@ static SQLITE_NO_TSAN int walIndexTryHdr(Wal *pWal, int *pChanged){
** give false-positive warnings about these accesses because the tools do not
** account for the double-read and the memory barrier. The use of mutexes
** here would be problematic as the memory being accessed is potentially
- ** shared among multiple processes and not all mutex implementions work
+ ** shared among multiple processes and not all mutex implementations work
** reliably in that environment.
*/
aHdr = walIndexHdr(pWal);
@@ -65947,7 +67183,9 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){
}
}else{
int bWriteLock = pWal->writeLock;
- if( bWriteLock || SQLITE_OK==(rc = walLockWriter(pWal)) ){
+ if( bWriteLock
+ || SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1))
+ ){
pWal->writeLock = 1;
if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){
badHdr = walIndexTryHdr(pWal, pChanged);
@@ -65955,7 +67193,8 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){
/* If the wal-index header is still malformed even while holding
** a WRITE lock, it can only mean that the header is corrupted and
** needs to be reconstructed. So run recovery to do exactly that.
- */
+ ** Disable blocking locks first. */
+ walDisableBlocking(pWal);
rc = walIndexRecover(pWal);
*pChanged = 1;
}
@@ -66166,6 +67405,37 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){
}
/*
+** The final argument passed to walTryBeginRead() is of type (int*). The
+** caller should invoke walTryBeginRead as follows:
+**
+** int cnt = 0;
+** do {
+** rc = walTryBeginRead(..., &cnt);
+** }while( rc==WAL_RETRY );
+**
+** The final value of "cnt" is of no use to the caller. It is used by
+** the implementation of walTryBeginRead() as follows:
+**
+** + Each time walTryBeginRead() is called, it is incremented. Once
+** it reaches WAL_RETRY_PROTOCOL_LIMIT - indicating that walTryBeginRead()
+** has many times been invoked and failed with WAL_RETRY - walTryBeginRead()
+** returns SQLITE_PROTOCOL.
+**
+** + If SQLITE_ENABLE_SETLK_TIMEOUT is defined and walTryBeginRead() failed
+** because a blocking lock timed out (SQLITE_BUSY_TIMEOUT from the OS
+** layer), the WAL_RETRY_BLOCKED_MASK bit is set in "cnt". In this case
+** the next invocation of walTryBeginRead() may omit an expected call to
+** sqlite3OsSleep(). There has already been a delay when the previous call
+** waited on a lock.
+*/
+#define WAL_RETRY_PROTOCOL_LIMIT 100
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+# define WAL_RETRY_BLOCKED_MASK 0x10000000
+#else
+# define WAL_RETRY_BLOCKED_MASK 0
+#endif
+
+/*
** Attempt to start a read transaction. This might fail due to a race or
** other transient condition. When that happens, it returns WAL_RETRY to
** indicate to the caller that it is safe to retry immediately.
@@ -66215,13 +67485,16 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){
** so it takes care to hold an exclusive lock on the corresponding
** WAL_READ_LOCK() while changing values.
*/
-static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
+static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){
volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */
u32 mxReadMark; /* Largest aReadMark[] value */
int mxI; /* Index of largest aReadMark[] value */
int i; /* Loop counter */
int rc = SQLITE_OK; /* Return code */
u32 mxFrame; /* Wal frame to lock to */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ int nBlockTmout = 0;
+#endif
assert( pWal->readLock<0 ); /* Not currently locked */
@@ -66245,14 +67518,34 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
** so that on the 100th (and last) RETRY we delay for 323 milliseconds.
** The total delay time before giving up is less than 10 seconds.
*/
- if( cnt>5 ){
+ (*pCnt)++;
+ if( *pCnt>5 ){
int nDelay = 1; /* Pause time in microseconds */
- if( cnt>100 ){
+ int cnt = (*pCnt & ~WAL_RETRY_BLOCKED_MASK);
+ if( cnt>WAL_RETRY_PROTOCOL_LIMIT ){
VVA_ONLY( pWal->lockError = 1; )
return SQLITE_PROTOCOL;
}
- if( cnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39;
+ if( *pCnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39;
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ /* In SQLITE_ENABLE_SETLK_TIMEOUT builds, configure the file-descriptor
+ ** to block for locks for approximately nDelay us. This affects three
+ ** locks: (a) the shared lock taken on the DMS slot in os_unix.c (if
+ ** using os_unix.c), (b) the WRITER lock taken in walIndexReadHdr() if the
+ ** first attempted read fails, and (c) the shared lock taken on the
+ ** read-mark.
+ **
+ ** If the previous call failed due to an SQLITE_BUSY_TIMEOUT error,
+ ** then sleep for the minimum of 1us. The previous call already provided
+ ** an extra delay while it was blocking on the lock.
+ */
+ nBlockTmout = (nDelay+998) / 1000;
+ if( !useWal && walEnableBlockingMs(pWal, nBlockTmout) ){
+ if( *pCnt & WAL_RETRY_BLOCKED_MASK ) nDelay = 1;
+ }
+#endif
sqlite3OsSleep(pWal->pVfs, nDelay);
+ *pCnt &= ~WAL_RETRY_BLOCKED_MASK;
}
if( !useWal ){
@@ -66260,6 +67553,13 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
if( pWal->bShmUnreliable==0 ){
rc = walIndexReadHdr(pWal, pChanged);
}
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ walDisableBlocking(pWal);
+ if( rc==SQLITE_BUSY_TIMEOUT ){
+ rc = SQLITE_BUSY;
+ *pCnt |= WAL_RETRY_BLOCKED_MASK;
+ }
+#endif
if( rc==SQLITE_BUSY ){
/* If there is not a recovery running in another thread or process
** then convert BUSY errors to WAL_RETRY. If recovery is known to
@@ -66296,6 +67596,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
assert( pWal->nWiData>0 );
assert( pWal->apWiData[0]!=0 );
pInfo = walCkptInfo(pWal);
+ SEH_INJECT_FAULT;
if( !useWal && AtomicLoad(&pInfo->nBackfill)==pWal->hdr.mxFrame
#ifdef SQLITE_ENABLE_SNAPSHOT
&& (pWal->pSnapshot==0 || pWal->hdr.mxFrame==0)
@@ -66345,7 +67646,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
}
#endif
for(i=1; i<WAL_NREADER; i++){
- u32 thisMark = AtomicLoad(pInfo->aReadMark+i);
+ u32 thisMark = AtomicLoad(pInfo->aReadMark+i); SEH_INJECT_FAULT;
if( mxReadMark<=thisMark && thisMark<=mxFrame ){
assert( thisMark!=READMARK_NOT_USED );
mxReadMark = thisMark;
@@ -66373,9 +67674,19 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT;
}
+ (void)walEnableBlockingMs(pWal, nBlockTmout);
rc = walLockShared(pWal, WAL_READ_LOCK(mxI));
+ walDisableBlocking(pWal);
if( rc ){
- return rc==SQLITE_BUSY ? WAL_RETRY : rc;
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ if( rc==SQLITE_BUSY_TIMEOUT ){
+ *pCnt |= WAL_RETRY_BLOCKED_MASK;
+ }
+#else
+ assert( rc!=SQLITE_BUSY_TIMEOUT );
+#endif
+ assert( (rc&0xFF)!=SQLITE_BUSY||rc==SQLITE_BUSY||rc==SQLITE_BUSY_TIMEOUT );
+ return (rc&0xFF)==SQLITE_BUSY ? WAL_RETRY : rc;
}
/* Now that the read-lock has been obtained, check that neither the
** value in the aReadMark[] array or the contents of the wal-index
@@ -66411,7 +67722,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
** we can guarantee that the checkpointer that set nBackfill could not
** see any pages past pWal->hdr.mxFrame, this problem does not come up.
*/
- pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1;
+ pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; SEH_INJECT_FAULT;
walShmBarrier(pWal);
if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark
|| memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr))
@@ -66427,6 +67738,54 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
#ifdef SQLITE_ENABLE_SNAPSHOT
/*
+** This function does the work of sqlite3WalSnapshotRecover().
+*/
+static int walSnapshotRecover(
+ Wal *pWal, /* WAL handle */
+ void *pBuf1, /* Temp buffer pWal->szPage bytes in size */
+ void *pBuf2 /* Temp buffer pWal->szPage bytes in size */
+){
+ int szPage = (int)pWal->szPage;
+ int rc;
+ i64 szDb; /* Size of db file in bytes */
+
+ rc = sqlite3OsFileSize(pWal->pDbFd, &szDb);
+ if( rc==SQLITE_OK ){
+ volatile WalCkptInfo *pInfo = walCkptInfo(pWal);
+ u32 i = pInfo->nBackfillAttempted;
+ for(i=pInfo->nBackfillAttempted; i>AtomicLoad(&pInfo->nBackfill); i--){
+ WalHashLoc sLoc; /* Hash table location */
+ u32 pgno; /* Page number in db file */
+ i64 iDbOff; /* Offset of db file entry */
+ i64 iWalOff; /* Offset of wal file entry */
+
+ rc = walHashGet(pWal, walFramePage(i), &sLoc);
+ if( rc!=SQLITE_OK ) break;
+ assert( i - sLoc.iZero - 1 >=0 );
+ pgno = sLoc.aPgno[i-sLoc.iZero-1];
+ iDbOff = (i64)(pgno-1) * szPage;
+
+ if( iDbOff+szPage<=szDb ){
+ iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE;
+ rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff);
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff);
+ }
+
+ if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){
+ break;
+ }
+ }
+
+ pInfo->nBackfillAttempted = i-1;
+ }
+ }
+
+ return rc;
+}
+
+/*
** Attempt to reduce the value of the WalCkptInfo.nBackfillAttempted
** variable so that older snapshots can be accessed. To do this, loop
** through all wal frames from nBackfillAttempted to (nBackfill+1),
@@ -66451,50 +67810,21 @@ SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){
assert( pWal->readLock>=0 );
rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1);
if( rc==SQLITE_OK ){
- volatile WalCkptInfo *pInfo = walCkptInfo(pWal);
- int szPage = (int)pWal->szPage;
- i64 szDb; /* Size of db file in bytes */
-
- rc = sqlite3OsFileSize(pWal->pDbFd, &szDb);
- if( rc==SQLITE_OK ){
- void *pBuf1 = sqlite3_malloc(szPage);
- void *pBuf2 = sqlite3_malloc(szPage);
- if( pBuf1==0 || pBuf2==0 ){
- rc = SQLITE_NOMEM;
- }else{
- u32 i = pInfo->nBackfillAttempted;
- for(i=pInfo->nBackfillAttempted; i>AtomicLoad(&pInfo->nBackfill); i--){
- WalHashLoc sLoc; /* Hash table location */
- u32 pgno; /* Page number in db file */
- i64 iDbOff; /* Offset of db file entry */
- i64 iWalOff; /* Offset of wal file entry */
-
- rc = walHashGet(pWal, walFramePage(i), &sLoc);
- if( rc!=SQLITE_OK ) break;
- assert( i - sLoc.iZero - 1 >=0 );
- pgno = sLoc.aPgno[i-sLoc.iZero-1];
- iDbOff = (i64)(pgno-1) * szPage;
-
- if( iDbOff+szPage<=szDb ){
- iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE;
- rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff);
-
- if( rc==SQLITE_OK ){
- rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff);
- }
-
- if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){
- break;
- }
- }
-
- pInfo->nBackfillAttempted = i-1;
- }
+ void *pBuf1 = sqlite3_malloc(pWal->szPage);
+ void *pBuf2 = sqlite3_malloc(pWal->szPage);
+ if( pBuf1==0 || pBuf2==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ pWal->ckptLock = 1;
+ SEH_TRY {
+ rc = walSnapshotRecover(pWal, pBuf1, pBuf2);
}
-
- sqlite3_free(pBuf1);
- sqlite3_free(pBuf2);
+ SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; )
+ pWal->ckptLock = 0;
}
+
+ sqlite3_free(pBuf1);
+ sqlite3_free(pBuf2);
walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1);
}
@@ -66503,28 +67833,20 @@ SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){
#endif /* SQLITE_ENABLE_SNAPSHOT */
/*
-** Begin a read transaction on the database.
-**
-** This routine used to be called sqlite3OpenSnapshot() and with good reason:
-** it takes a snapshot of the state of the WAL and wal-index for the current
-** instant in time. The current thread will continue to use this snapshot.
-** Other threads might append new content to the WAL and wal-index but
-** that extra content is ignored by the current thread.
-**
-** If the database contents have changes since the previous read
-** transaction, then *pChanged is set to 1 before returning. The
-** Pager layer will use this to know that its cache is stale and
-** needs to be flushed.
+** This function does the work of sqlite3WalBeginReadTransaction() (see
+** below). That function simply calls this one inside an SEH_TRY{...} block.
*/
-SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
+static int walBeginReadTransaction(Wal *pWal, int *pChanged){
int rc; /* Return code */
int cnt = 0; /* Number of TryBeginRead attempts */
#ifdef SQLITE_ENABLE_SNAPSHOT
+ int ckptLock = 0;
int bChanged = 0;
WalIndexHdr *pSnapshot = pWal->pSnapshot;
#endif
assert( pWal->ckptLock==0 );
+ assert( pWal->nSehTry>0 );
#ifdef SQLITE_ENABLE_SNAPSHOT
if( pSnapshot ){
@@ -66547,12 +67869,12 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
if( rc!=SQLITE_OK ){
return rc;
}
- pWal->ckptLock = 1;
+ ckptLock = 1;
}
#endif
do{
- rc = walTryBeginRead(pWal, pChanged, 0, ++cnt);
+ rc = walTryBeginRead(pWal, pChanged, 0, &cnt);
}while( rc==WAL_RETRY );
testcase( (rc&0xff)==SQLITE_BUSY );
testcase( (rc&0xff)==SQLITE_IOERR );
@@ -66611,16 +67933,38 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
}
/* Release the shared CKPT lock obtained above. */
- if( pWal->ckptLock ){
+ if( ckptLock ){
assert( pSnapshot );
walUnlockShared(pWal, WAL_CKPT_LOCK);
- pWal->ckptLock = 0;
}
#endif
return rc;
}
/*
+** Begin a read transaction on the database.
+**
+** This routine used to be called sqlite3OpenSnapshot() and with good reason:
+** it takes a snapshot of the state of the WAL and wal-index for the current
+** instant in time. The current thread will continue to use this snapshot.
+** Other threads might append new content to the WAL and wal-index but
+** that extra content is ignored by the current thread.
+**
+** If the database contents have changes since the previous read
+** transaction, then *pChanged is set to 1 before returning. The
+** Pager layer will use this to know that its cache is stale and
+** needs to be flushed.
+*/
+SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
+ int rc;
+ SEH_TRY {
+ rc = walBeginReadTransaction(pWal, pChanged);
+ }
+ SEH_EXCEPT( rc = walHandleException(pWal); )
+ return rc;
+}
+
+/*
** Finish with a read transaction. All this does is release the
** read-lock.
*/
@@ -66640,7 +67984,7 @@ SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){
** Return SQLITE_OK if successful, or an error code if an error occurs. If an
** error does occur, the final value of *piRead is undefined.
*/
-SQLITE_PRIVATE int sqlite3WalFindFrame(
+static int walFindFrame(
Wal *pWal, /* WAL handle */
Pgno pgno, /* Database page number to read data for */
u32 *piRead /* OUT: Frame number (or zero) */
@@ -66703,6 +68047,7 @@ SQLITE_PRIVATE int sqlite3WalFindFrame(
}
nCollide = HASHTABLE_NSLOT;
iKey = walHash(pgno);
+ SEH_INJECT_FAULT;
while( (iH = AtomicLoad(&sLoc.aHash[iKey]))!=0 ){
u32 iFrame = iH + sLoc.iZero;
if( iFrame<=iLast && iFrame>=pWal->minFrame && sLoc.aPgno[iH-1]==pgno ){
@@ -66710,6 +68055,7 @@ SQLITE_PRIVATE int sqlite3WalFindFrame(
iRead = iFrame;
}
if( (nCollide--)==0 ){
+ *piRead = 0;
return SQLITE_CORRUPT_BKPT;
}
iKey = walNextHash(iKey);
@@ -66740,6 +68086,30 @@ SQLITE_PRIVATE int sqlite3WalFindFrame(
}
/*
+** Search the wal file for page pgno. If found, set *piRead to the frame that
+** contains the page. Otherwise, if pgno is not in the wal file, set *piRead
+** to zero.
+**
+** Return SQLITE_OK if successful, or an error code if an error occurs. If an
+** error does occur, the final value of *piRead is undefined.
+**
+** The difference between this function and walFindFrame() is that this
+** function wraps walFindFrame() in an SEH_TRY{...} block.
+*/
+SQLITE_PRIVATE int sqlite3WalFindFrame(
+ Wal *pWal, /* WAL handle */
+ Pgno pgno, /* Database page number to read data for */
+ u32 *piRead /* OUT: Frame number (or zero) */
+){
+ int rc;
+ SEH_TRY {
+ rc = walFindFrame(pWal, pgno, piRead);
+ }
+ SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; )
+ return rc;
+}
+
+/*
** Read the contents of frame iRead from the wal file into buffer pOut
** (which is nOut bytes in size). Return SQLITE_OK if successful, or an
** error code otherwise.
@@ -66820,12 +68190,17 @@ SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal){
** time the read transaction on this connection was started, then
** the write is disallowed.
*/
- if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){
+ SEH_TRY {
+ if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){
+ rc = SQLITE_BUSY_SNAPSHOT;
+ }
+ }
+ SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; )
+
+ if( rc!=SQLITE_OK ){
walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1);
pWal->writeLock = 0;
- rc = SQLITE_BUSY_SNAPSHOT;
}
-
return rc;
}
@@ -66861,30 +68236,33 @@ SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *p
Pgno iMax = pWal->hdr.mxFrame;
Pgno iFrame;
- /* Restore the clients cache of the wal-index header to the state it
- ** was in before the client began writing to the database.
- */
- memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr));
-
- for(iFrame=pWal->hdr.mxFrame+1;
- ALWAYS(rc==SQLITE_OK) && iFrame<=iMax;
- iFrame++
- ){
- /* This call cannot fail. Unless the page for which the page number
- ** is passed as the second argument is (a) in the cache and
- ** (b) has an outstanding reference, then xUndo is either a no-op
- ** (if (a) is false) or simply expels the page from the cache (if (b)
- ** is false).
- **
- ** If the upper layer is doing a rollback, it is guaranteed that there
- ** are no outstanding references to any page other than page 1. And
- ** page 1 is never written to the log until the transaction is
- ** committed. As a result, the call to xUndo may not fail.
+ SEH_TRY {
+ /* Restore the clients cache of the wal-index header to the state it
+ ** was in before the client began writing to the database.
*/
- assert( walFramePgno(pWal, iFrame)!=1 );
- rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame));
+ memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr));
+
+ for(iFrame=pWal->hdr.mxFrame+1;
+ ALWAYS(rc==SQLITE_OK) && iFrame<=iMax;
+ iFrame++
+ ){
+ /* This call cannot fail. Unless the page for which the page number
+ ** is passed as the second argument is (a) in the cache and
+ ** (b) has an outstanding reference, then xUndo is either a no-op
+ ** (if (a) is false) or simply expels the page from the cache (if (b)
+ ** is false).
+ **
+ ** If the upper layer is doing a rollback, it is guaranteed that there
+ ** are no outstanding references to any page other than page 1. And
+ ** page 1 is never written to the log until the transaction is
+ ** committed. As a result, the call to xUndo may not fail.
+ */
+ assert( walFramePgno(pWal, iFrame)!=1 );
+ rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame));
+ }
+ if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal);
}
- if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal);
+ SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; )
}
return rc;
}
@@ -66928,7 +68306,10 @@ SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){
pWal->hdr.mxFrame = aWalData[0];
pWal->hdr.aFrameCksum[0] = aWalData[1];
pWal->hdr.aFrameCksum[1] = aWalData[2];
- walCleanupHash(pWal);
+ SEH_TRY {
+ walCleanupHash(pWal);
+ }
+ SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; )
}
return rc;
@@ -66978,7 +68359,7 @@ static int walRestartLog(Wal *pWal){
cnt = 0;
do{
int notUsed;
- rc = walTryBeginRead(pWal, &notUsed, 1, ++cnt);
+ rc = walTryBeginRead(pWal, &notUsed, 1, &cnt);
}while( rc==WAL_RETRY );
assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */
testcase( (rc&0xff)==SQLITE_IOERR );
@@ -67109,7 +68490,7 @@ static int walRewriteChecksums(Wal *pWal, u32 iLast){
** Write a set of frames to the log. The caller must hold the write-lock
** on the log file (obtained using sqlite3WalBeginWriteTransaction()).
*/
-SQLITE_PRIVATE int sqlite3WalFrames(
+static int walFrames(
Wal *pWal, /* Wal handle to write to */
int szPage, /* Database page-size in bytes */
PgHdr *pList, /* List of dirty pages to write */
@@ -67220,7 +68601,7 @@ SQLITE_PRIVATE int sqlite3WalFrames(
** checksums must be recomputed when the transaction is committed. */
if( iFirst && (p->pDirty || isCommit==0) ){
u32 iWrite = 0;
- VVA_ONLY(rc =) sqlite3WalFindFrame(pWal, p->pgno, &iWrite);
+ VVA_ONLY(rc =) walFindFrame(pWal, p->pgno, &iWrite);
assert( rc==SQLITE_OK || iWrite==0 );
if( iWrite>=iFirst ){
i64 iOff = walFrameOffset(iWrite, szPage) + WAL_FRAME_HDRSIZE;
@@ -67340,6 +68721,29 @@ SQLITE_PRIVATE int sqlite3WalFrames(
}
/*
+** Write a set of frames to the log. The caller must hold the write-lock
+** on the log file (obtained using sqlite3WalBeginWriteTransaction()).
+**
+** The difference between this function and walFrames() is that this
+** function wraps walFrames() in an SEH_TRY{...} block.
+*/
+SQLITE_PRIVATE int sqlite3WalFrames(
+ Wal *pWal, /* Wal handle to write to */
+ int szPage, /* Database page-size in bytes */
+ PgHdr *pList, /* List of dirty pages to write */
+ Pgno nTruncate, /* Database size after this commit */
+ int isCommit, /* True if this is a commit */
+ int sync_flags /* Flags to pass to OsSync() (or 0) */
+){
+ int rc;
+ SEH_TRY {
+ rc = walFrames(pWal, szPage, pList, nTruncate, isCommit, sync_flags);
+ }
+ SEH_EXCEPT( rc = walHandleException(pWal); )
+ return rc;
+}
+
+/*
** This routine is called to implement sqlite3_wal_checkpoint() and
** related interfaces.
**
@@ -67376,10 +68780,9 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint(
if( pWal->readOnly ) return SQLITE_READONLY;
WALTRACE(("WAL%p: checkpoint begins\n", pWal));
- /* Enable blocking locks, if possible. If blocking locks are successfully
- ** enabled, set xBusy2=0 so that the busy-handler is never invoked. */
+ /* Enable blocking locks, if possible. */
sqlite3WalDb(pWal, db);
- (void)walEnableBlocking(pWal);
+ if( xBusy2 ) (void)walEnableBlocking(pWal);
/* IMPLEMENTATION-OF: R-62028-47212 All calls obtain an exclusive
** "checkpoint" lock on the database file.
@@ -67418,30 +68821,38 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint(
/* Read the wal-index header. */
- if( rc==SQLITE_OK ){
- walDisableBlocking(pWal);
- rc = walIndexReadHdr(pWal, &isChanged);
- (void)walEnableBlocking(pWal);
- if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){
- sqlite3OsUnfetch(pWal->pDbFd, 0, 0);
+ SEH_TRY {
+ if( rc==SQLITE_OK ){
+ /* For a passive checkpoint, do not re-enable blocking locks after
+ ** reading the wal-index header. A passive checkpoint should not block
+ ** or invoke the busy handler. The only lock such a checkpoint may
+ ** attempt to obtain is a lock on a read-slot, and it should give up
+ ** immediately and do a partial checkpoint if it cannot obtain it. */
+ walDisableBlocking(pWal);
+ rc = walIndexReadHdr(pWal, &isChanged);
+ if( eMode2!=SQLITE_CHECKPOINT_PASSIVE ) (void)walEnableBlocking(pWal);
+ if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){
+ sqlite3OsUnfetch(pWal->pDbFd, 0, 0);
+ }
}
- }
-
- /* Copy data from the log to the database file. */
- if( rc==SQLITE_OK ){
- if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){
- rc = SQLITE_CORRUPT_BKPT;
- }else{
- rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf);
- }
+ /* Copy data from the log to the database file. */
+ if( rc==SQLITE_OK ){
+ if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags,zBuf);
+ }
- /* If no error occurred, set the output variables. */
- if( rc==SQLITE_OK || rc==SQLITE_BUSY ){
- if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame;
- if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill);
+ /* If no error occurred, set the output variables. */
+ if( rc==SQLITE_OK || rc==SQLITE_BUSY ){
+ if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame;
+ SEH_INJECT_FAULT;
+ if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill);
+ }
}
}
+ SEH_EXCEPT( rc = walHandleException(pWal); )
if( isChanged ){
/* If a new wal-index header was loaded before the checkpoint was
@@ -67518,7 +68929,9 @@ SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op){
** locks are taken in this case). Nor should the pager attempt to
** upgrade to exclusive-mode following such an error.
*/
+#ifndef SQLITE_USE_SEH
assert( pWal->readLock>=0 || pWal->lockError );
+#endif
assert( pWal->readLock>=0 || (op<=0 && pWal->exclusiveMode==0) );
if( op==0 ){
@@ -67619,16 +69032,19 @@ SQLITE_API int sqlite3_snapshot_cmp(sqlite3_snapshot *p1, sqlite3_snapshot *p2){
*/
SQLITE_PRIVATE int sqlite3WalSnapshotCheck(Wal *pWal, sqlite3_snapshot *pSnapshot){
int rc;
- rc = walLockShared(pWal, WAL_CKPT_LOCK);
- if( rc==SQLITE_OK ){
- WalIndexHdr *pNew = (WalIndexHdr*)pSnapshot;
- if( memcmp(pNew->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt))
- || pNew->mxFrame<walCkptInfo(pWal)->nBackfillAttempted
- ){
- rc = SQLITE_ERROR_SNAPSHOT;
- walUnlockShared(pWal, WAL_CKPT_LOCK);
+ SEH_TRY {
+ rc = walLockShared(pWal, WAL_CKPT_LOCK);
+ if( rc==SQLITE_OK ){
+ WalIndexHdr *pNew = (WalIndexHdr*)pSnapshot;
+ if( memcmp(pNew->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt))
+ || pNew->mxFrame<walCkptInfo(pWal)->nBackfillAttempted
+ ){
+ rc = SQLITE_ERROR_SNAPSHOT;
+ walUnlockShared(pWal, WAL_CKPT_LOCK);
+ }
}
}
+ SEH_EXCEPT( rc = walHandleException(pWal); )
return rc;
}
@@ -67751,7 +69167,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){
** 22 1 Min embedded payload fraction (must be 32)
** 23 1 Min leaf payload fraction (must be 32)
** 24 4 File change counter
-** 28 4 Reserved for future use
+** 28 4 The size of the database in pages
** 32 4 First freelist page
** 36 4 Number of freelist pages in the file
** 40 60 15 4-byte meta values passed to higher layers
@@ -67867,7 +69283,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){
** 0x81 0x00 becomes 0x00000080
** 0x82 0x00 becomes 0x00000100
** 0x80 0x7f becomes 0x0000007f
-** 0x8a 0x91 0xd1 0xac 0x78 becomes 0x12345678
+** 0x81 0x91 0xd1 0xac 0x78 becomes 0x12345678
** 0x81 0x81 0x81 0x81 0x01 becomes 0x10204081
**
** Variable length integers are used for rowids and to hold the number of
@@ -67950,7 +69366,7 @@ typedef struct CellInfo CellInfo;
** page that has been loaded into memory. The information in this object
** is derived from the raw on-disk page content.
**
-** As each database page is loaded into memory, the pager allocats an
+** As each database page is loaded into memory, the pager allocates an
** instance of this object and zeros the first 8 bytes. (This is the
** "extra" information associated with each page of the pager.)
**
@@ -68382,7 +69798,7 @@ struct IntegrityCk {
BtShared *pBt; /* The tree being checked out */
Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */
u8 *aPgRef; /* 1 bit per page in the db (see above) */
- Pgno nPage; /* Number of pages in the database */
+ Pgno nCkPage; /* Pages in the database. 0 for partial check */
int mxErr; /* Stop accumulating errors when this reaches zero */
int nErr; /* Number of messages written to zErrMsg so far */
int rc; /* SQLITE_OK, SQLITE_NOMEM, or SQLITE_INTERRUPT */
@@ -68406,7 +69822,7 @@ struct IntegrityCk {
/*
** get2byteAligned(), unlike get2byte(), requires that its argument point to a
-** two-byte aligned address. get2bytea() is only used for accessing the
+** two-byte aligned address. get2byteAligned() is only used for accessing the
** cell addresses in a btree header.
*/
#if SQLITE_BYTEORDER==4321
@@ -68583,7 +69999,7 @@ SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){
**
** There is a corresponding leave-all procedures.
**
-** Enter the mutexes in accending order by BtShared pointer address
+** Enter the mutexes in ascending order by BtShared pointer address
** to avoid the possibility of deadlock when two threads with
** two or more btrees in common both try to lock all their btrees
** at the same instant.
@@ -70250,7 +71666,7 @@ static void ptrmapPutOvflPtr(MemPage *pPage, MemPage *pSrc, u8 *pCell,int *pRC){
pPage->xParseCell(pPage, pCell, &info);
if( info.nLocal<info.nPayload ){
Pgno ovfl;
- if( SQLITE_WITHIN(pSrc->aDataEnd, pCell, pCell+info.nLocal) ){
+ if( SQLITE_OVERFLOW(pSrc->aDataEnd, pCell, pCell+info.nLocal) ){
testcase( pSrc!=pPage );
*pRC = SQLITE_CORRUPT_BKPT;
return;
@@ -70351,7 +71767,7 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){
iCellStart = get2byte(&data[hdr+5]);
if( nCell>0 ){
temp = sqlite3PagerTempSpace(pPage->pBt->pPager);
- memcpy(&temp[iCellStart], &data[iCellStart], usableSize - iCellStart);
+ memcpy(temp, data, usableSize);
src = temp;
for(i=0; i<nCell; i++){
u8 *pAddr; /* The i-th cell pointer */
@@ -70575,7 +71991,7 @@ static SQLITE_INLINE int allocateSpace(MemPage *pPage, int nByte, int *pIdx){
**
** Even though the freeblock list was checked by btreeComputeFreeSpace(),
** that routine will not detect overlap between cells or freeblocks. Nor
-** does it detect cells or freeblocks that encrouch into the reserved bytes
+** does it detect cells or freeblocks that encroach into the reserved bytes
** at the end of the page. So do additional corruption checks inside this
** routine and return SQLITE_CORRUPT if any problems are found.
*/
@@ -71034,68 +72450,41 @@ SQLITE_PRIVATE Pgno sqlite3BtreeLastPage(Btree *p){
/*
** Get a page from the pager and initialize it.
-**
-** If pCur!=0 then the page is being fetched as part of a moveToChild()
-** call. Do additional sanity checking on the page in this case.
-** And if the fetch fails, this routine must decrement pCur->iPage.
-**
-** The page is fetched as read-write unless pCur is not NULL and is
-** a read-only cursor.
-**
-** If an error occurs, then *ppPage is undefined. It
-** may remain unchanged, or it may be set to an invalid value.
*/
static int getAndInitPage(
BtShared *pBt, /* The database file */
Pgno pgno, /* Number of the page to get */
MemPage **ppPage, /* Write the page pointer here */
- BtCursor *pCur, /* Cursor to receive the page, or NULL */
int bReadOnly /* True for a read-only page */
){
int rc;
DbPage *pDbPage;
+ MemPage *pPage;
assert( sqlite3_mutex_held(pBt->mutex) );
- assert( pCur==0 || ppPage==&pCur->pPage );
- assert( pCur==0 || bReadOnly==pCur->curPagerFlags );
- assert( pCur==0 || pCur->iPage>0 );
if( pgno>btreePagecount(pBt) ){
- rc = SQLITE_CORRUPT_BKPT;
- goto getAndInitPage_error1;
+ *ppPage = 0;
+ return SQLITE_CORRUPT_BKPT;
}
rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, bReadOnly);
if( rc ){
- goto getAndInitPage_error1;
+ *ppPage = 0;
+ return rc;
}
- *ppPage = (MemPage*)sqlite3PagerGetExtra(pDbPage);
- if( (*ppPage)->isInit==0 ){
+ pPage = (MemPage*)sqlite3PagerGetExtra(pDbPage);
+ if( pPage->isInit==0 ){
btreePageFromDbPage(pDbPage, pgno, pBt);
- rc = btreeInitPage(*ppPage);
+ rc = btreeInitPage(pPage);
if( rc!=SQLITE_OK ){
- goto getAndInitPage_error2;
+ releasePage(pPage);
+ *ppPage = 0;
+ return rc;
}
}
- assert( (*ppPage)->pgno==pgno || CORRUPT_DB );
- assert( (*ppPage)->aData==sqlite3PagerGetData(pDbPage) );
-
- /* If obtaining a child page for a cursor, we must verify that the page is
- ** compatible with the root page. */
- if( pCur && ((*ppPage)->nCell<1 || (*ppPage)->intKey!=pCur->curIntKey) ){
- rc = SQLITE_CORRUPT_PGNO(pgno);
- goto getAndInitPage_error2;
- }
+ assert( pPage->pgno==pgno || CORRUPT_DB );
+ assert( pPage->aData==sqlite3PagerGetData(pDbPage) );
+ *ppPage = pPage;
return SQLITE_OK;
-
-getAndInitPage_error2:
- releasePage(*ppPage);
-getAndInitPage_error1:
- if( pCur ){
- pCur->iPage--;
- pCur->pPage = pCur->apPage[pCur->iPage];
- }
- testcase( pgno==0 );
- assert( pgno!=0 || rc!=SQLITE_OK );
- return rc;
}
/*
@@ -71178,7 +72567,7 @@ static void pageReinit(DbPage *pData){
** call to btreeInitPage() will likely return SQLITE_CORRUPT.
** But no harm is done by this. And it is very important that
** btreeInitPage() be called on every btree page so we make
- ** the call for every page that comes in for re-initing. */
+ ** the call for every page that comes in for re-initializing. */
btreeInitPage(pPage);
}
}
@@ -71357,6 +72746,9 @@ SQLITE_PRIVATE int sqlite3BtreeOpen(
assert( sizeof(u16)==2 );
assert( sizeof(Pgno)==4 );
+ /* Suppress false-positive compiler warning from PVS-Studio */
+ memset(&zDbHeader[16], 0, 8);
+
pBt = sqlite3MallocZero( sizeof(*pBt) );
if( pBt==0 ){
rc = SQLITE_NOMEM_BKPT;
@@ -71573,7 +72965,7 @@ static SQLITE_NOINLINE int allocateTempSpace(BtShared *pBt){
** can mean that fillInCell() only initializes the first 2 or 3
** bytes of pTmpSpace, but that the first 4 bytes are copied from
** it into a database page. This is not actually a problem, but it
- ** does cause a valgrind error when the 1 or 2 bytes of unitialized
+ ** does cause a valgrind error when the 1 or 2 bytes of uninitialized
** data is passed to system call write(). So to avoid this error,
** zero the first 4 bytes of temp space here.
**
@@ -71808,7 +73200,7 @@ SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p){
/*
** Return the number of bytes of space at the end of every page that
-** are intentually left unused. This is the "reserved" space that is
+** are intentionally left unused. This is the "reserved" space that is
** sometimes used by extensions.
**
** The value returned is the larger of the current reserve size and
@@ -72055,7 +73447,6 @@ static int lockBtree(BtShared *pBt){
){
goto page1_init_failed;
}
- pBt->btsFlags |= BTS_PAGESIZE_FIXED;
assert( (pageSize & 7)==0 );
/* EVIDENCE-OF: R-59310-51205 The "reserved space" size in the 1-byte
** integer at offset 20 is the number of bytes of space at the end of
@@ -72075,6 +73466,7 @@ static int lockBtree(BtShared *pBt){
releasePageOne(pPage1);
pBt->usableSize = usableSize;
pBt->pageSize = pageSize;
+ pBt->btsFlags |= BTS_PAGESIZE_FIXED;
freeTempSpace(pBt);
rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize,
pageSize-usableSize);
@@ -72094,6 +73486,7 @@ static int lockBtree(BtShared *pBt){
if( usableSize<480 ){
goto page1_init_failed;
}
+ pBt->btsFlags |= BTS_PAGESIZE_FIXED;
pBt->pageSize = pageSize;
pBt->usableSize = usableSize;
#ifndef SQLITE_OMIT_AUTOVACUUM
@@ -72272,7 +73665,11 @@ SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p){
** when A already has a read lock, we encourage A to give up and let B
** proceed.
*/
-SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){
+static SQLITE_NOINLINE int btreeBeginTrans(
+ Btree *p, /* The btree in which to start the transaction */
+ int wrflag, /* True to start a write transaction */
+ int *pSchemaVersion /* Put schema version number here, if not NULL */
+){
BtShared *pBt = p->pBt;
Pager *pPager = pBt->pPager;
int rc = SQLITE_OK;
@@ -72444,6 +73841,28 @@ trans_begun:
sqlite3BtreeLeave(p);
return rc;
}
+SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){
+ BtShared *pBt;
+ if( p->sharable
+ || p->inTrans==TRANS_NONE
+ || (p->inTrans==TRANS_READ && wrflag!=0)
+ ){
+ return btreeBeginTrans(p,wrflag,pSchemaVersion);
+ }
+ pBt = p->pBt;
+ if( pSchemaVersion ){
+ *pSchemaVersion = get4byte(&pBt->pPage1->aData[40]);
+ }
+ if( wrflag ){
+ /* This call makes sure that the pager has the correct number of
+ ** open savepoints. If the second parameter is greater than 0 and
+ ** the sub-journal is not already open, then it will be opened here.
+ */
+ return sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint);
+ }else{
+ return SQLITE_OK;
+ }
+}
#ifndef SQLITE_OMIT_AUTOVACUUM
@@ -73539,7 +74958,6 @@ SQLITE_PRIVATE void sqlite3BtreeCursorUnpin(BtCursor *pCur){
pCur->curFlags &= ~BTCF_Pinned;
}
-#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
/*
** Return the offset into the database file for the start of the
** payload to which the cursor is pointing.
@@ -73551,7 +74969,6 @@ SQLITE_PRIVATE i64 sqlite3BtreeOffset(BtCursor *pCur){
return (i64)pCur->pBt->pageSize*((i64)pCur->pPage->pgno - 1) +
(i64)(pCur->info.pPayload - pCur->pPage->aData);
}
-#endif /* SQLITE_ENABLE_OFFSET_SQL_FUNC */
/*
** Return the number of bytes of payload for the entry that pCur is
@@ -73577,7 +74994,7 @@ SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor *pCur){
** routine always returns 2147483647 (which is the largest record
** that SQLite can handle) or more. But returning a smaller value might
** prevent large memory allocations when trying to interpret a
-** corrupt datrabase.
+** corrupt database.
**
** The current implementation merely returns the size of the underlying
** database file.
@@ -73877,7 +75294,6 @@ static int accessPayload(
assert( aWrite>=pBufStart ); /* due to (6) */
memcpy(aSave, aWrite, 4);
rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1));
- if( rc && nextPage>pBt->nPage ) rc = SQLITE_CORRUPT_BKPT;
nextPage = get4byte(aWrite);
memcpy(aWrite, aSave, 4);
}else
@@ -74039,6 +75455,7 @@ SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor *pCur, u32 *pAmt){
** vice-versa).
*/
static int moveToChild(BtCursor *pCur, u32 newPgno){
+ int rc;
assert( cursorOwnsBtShared(pCur) );
assert( pCur->eState==CURSOR_VALID );
assert( pCur->iPage<BTCURSOR_MAX_DEPTH );
@@ -74052,8 +75469,18 @@ static int moveToChild(BtCursor *pCur, u32 newPgno){
pCur->apPage[pCur->iPage] = pCur->pPage;
pCur->ix = 0;
pCur->iPage++;
- return getAndInitPage(pCur->pBt, newPgno, &pCur->pPage, pCur,
- pCur->curPagerFlags);
+ rc = getAndInitPage(pCur->pBt, newPgno, &pCur->pPage, pCur->curPagerFlags);
+ assert( pCur->pPage!=0 || rc!=SQLITE_OK );
+ if( rc==SQLITE_OK
+ && (pCur->pPage->nCell<1 || pCur->pPage->intKey!=pCur->curIntKey)
+ ){
+ releasePage(pCur->pPage);
+ rc = SQLITE_CORRUPT_PGNO(newPgno);
+ }
+ if( rc ){
+ pCur->pPage = pCur->apPage[--pCur->iPage];
+ }
+ return rc;
}
#ifdef SQLITE_DEBUG
@@ -74160,7 +75587,7 @@ static int moveToRoot(BtCursor *pCur){
sqlite3BtreeClearCursor(pCur);
}
rc = getAndInitPage(pCur->pBt, pCur->pgnoRoot, &pCur->pPage,
- 0, pCur->curPagerFlags);
+ pCur->curPagerFlags);
if( rc!=SQLITE_OK ){
pCur->eState = CURSOR_INVALID;
return rc;
@@ -74272,7 +75699,7 @@ SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){
*pRes = 0;
rc = moveToLeftmost(pCur);
}else if( rc==SQLITE_EMPTY ){
- assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 );
+ assert( pCur->pgnoRoot==0 || (pCur->pPage!=0 && pCur->pPage->nCell==0) );
*pRes = 1;
rc = SQLITE_OK;
}
@@ -74377,7 +75804,7 @@ SQLITE_PRIVATE int sqlite3BtreeTableMoveto(
/* If the requested key is one more than the previous key, then
** try to get there using sqlite3BtreeNext() rather than a full
** binary search. This is an optimization only. The correct answer
- ** is still obtained without this case, only a little more slowely */
+ ** is still obtained without this case, only a little more slowly. */
if( pCur->info.nKey+1==intKey ){
*pRes = 0;
rc = sqlite3BtreeNext(pCur, 0);
@@ -74773,10 +76200,36 @@ bypass_moveto_root:
}else{
chldPg = get4byte(findCell(pPage, lwr));
}
- pCur->ix = (u16)lwr;
- rc = moveToChild(pCur, chldPg);
- if( rc ) break;
- }
+
+ /* This block is similar to an in-lined version of:
+ **
+ ** pCur->ix = (u16)lwr;
+ ** rc = moveToChild(pCur, chldPg);
+ ** if( rc ) break;
+ */
+ pCur->info.nSize = 0;
+ pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
+ if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ pCur->aiIdx[pCur->iPage] = (u16)lwr;
+ pCur->apPage[pCur->iPage] = pCur->pPage;
+ pCur->ix = 0;
+ pCur->iPage++;
+ rc = getAndInitPage(pCur->pBt, chldPg, &pCur->pPage, pCur->curPagerFlags);
+ if( rc==SQLITE_OK
+ && (pCur->pPage->nCell<1 || pCur->pPage->intKey!=pCur->curIntKey)
+ ){
+ releasePage(pCur->pPage);
+ rc = SQLITE_CORRUPT_PGNO(chldPg);
+ }
+ if( rc ){
+ pCur->pPage = pCur->apPage[--pCur->iPage];
+ break;
+ }
+ /*
+ ***** End of in-lined moveToChild() call */
+ }
moveto_index_finish:
pCur->info.nSize = 0;
assert( (pCur->curFlags & BTCF_ValidOvfl)==0 );
@@ -74960,7 +76413,10 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur){
}
pPage = pCur->pPage;
- assert( pPage->isInit );
+ if( sqlite3FaultSim(412) ) pPage->isInit = 0;
+ if( !pPage->isInit ){
+ return SQLITE_CORRUPT_BKPT;
+ }
if( !pPage->leaf ){
int idx = pCur->ix;
rc = moveToChild(pCur, get4byte(findCell(pPage, idx)));
@@ -75560,7 +77016,7 @@ static SQLITE_NOINLINE int clearCellOverflow(
/* Call xParseCell to compute the size of a cell. If the cell contains
** overflow, then invoke cellClearOverflow to clear out that overflow.
-** STore the result code (SQLITE_OK or some error code) in rc.
+** Store the result code (SQLITE_OK or some error code) in rc.
**
** Implemented as macro to force inlining for performance.
*/
@@ -76171,12 +77627,13 @@ static int rebuildPage(
int k; /* Current slot in pCArray->apEnd[] */
u8 *pSrcEnd; /* Current pCArray->apEnd[k] value */
+ assert( nCell>0 );
assert( i<iEnd );
j = get2byte(&aData[hdr+5]);
- if( NEVER(j>(u32)usableSize) ){ j = 0; }
+ if( j>(u32)usableSize ){ j = 0; }
memcpy(&pTmp[j], &aData[j], usableSize - j);
- for(k=0; pCArray->ixNx[k]<=i && ALWAYS(k<NB*2); k++){}
+ for(k=0; ALWAYS(k<NB*2) && pCArray->ixNx[k]<=i; k++){}
pSrcEnd = pCArray->apEnd[k];
pData = pEnd;
@@ -76239,7 +77696,7 @@ static int rebuildPage(
** Finally, argument pBegin points to the byte immediately following the
** end of the space required by this page for the cell-pointer area (for
** all cells - not just those inserted by the current call). If the content
-** area must be extended to before this point in order to accomodate all
+** area must be extended to before this point in order to accommodate all
** cells in apCell[], then the cells do not fit and non-zero is returned.
*/
static int pageInsertArray(
@@ -76259,7 +77716,7 @@ static int pageInsertArray(
u8 *pEnd; /* Maximum extent of cell data */
assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */
if( iEnd<=iFirst ) return 0;
- for(k=0; pCArray->ixNx[k]<=i && ALWAYS(k<NB*2); k++){}
+ for(k=0; ALWAYS(k<NB*2) && pCArray->ixNx[k]<=i ; k++){}
pEnd = pCArray->apEnd[k];
while( 1 /*Exit by break*/ ){
int sz, rc;
@@ -76477,6 +77934,7 @@ static int editPage(
return SQLITE_OK;
editpage_fail:
/* Unable to edit this page. Rebuild it from scratch instead. */
+ if( nNew<1 ) return SQLITE_CORRUPT_BKPT;
populateCellCache(pCArray, iNew, nNew);
return rebuildPage(pCArray, iNew, nNew, pPg);
}
@@ -76554,7 +78012,7 @@ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){
** with entries for the new page, and any pointer from the
** cell on the page to an overflow page. If either of these
** operations fails, the return code is set, but the contents
- ** of the parent page are still manipulated by thh code below.
+ ** of the parent page are still manipulated by the code below.
** That is Ok, at this point the parent page is guaranteed to
** be marked as dirty. Returning an error code will cause a
** rollback, undoing any changes made to the parent page.
@@ -76830,7 +78288,7 @@ static int balance_nonroot(
pgno = get4byte(pRight);
while( 1 ){
if( rc==SQLITE_OK ){
- rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0);
+ rc = getAndInitPage(pBt, pgno, &apOld[i], 0);
}
if( rc ){
memset(apOld, 0, (i+1)*sizeof(MemPage*));
@@ -77144,7 +78602,7 @@ static int balance_nonroot(
}
}
- /* Sanity check: For a non-corrupt database file one of the follwing
+ /* Sanity check: For a non-corrupt database file one of the following
** must be true:
** (1) We found one or more cells (cntNew[0])>0), or
** (2) pPage is a virtual root page. A virtual root page is when
@@ -77369,9 +78827,9 @@ static int balance_nonroot(
iOvflSpace += sz;
assert( sz<=pBt->maxLocal+23 );
assert( iOvflSpace <= (int)pBt->pageSize );
- for(k=0; b.ixNx[k]<=j && ALWAYS(k<NB*2); k++){}
+ for(k=0; ALWAYS(k<NB*2) && b.ixNx[k]<=j; k++){}
pSrcEnd = b.apEnd[k];
- if( SQLITE_WITHIN(pSrcEnd, pCell, pCell+sz) ){
+ if( SQLITE_OVERFLOW(pSrcEnd, pCell, pCell+sz) ){
rc = SQLITE_CORRUPT_BKPT;
goto balance_cleanup;
}
@@ -77405,6 +78863,8 @@ static int balance_nonroot(
for(i=1-nNew; i<nNew; i++){
int iPg = i<0 ? -i : i;
assert( iPg>=0 && iPg<nNew );
+ assert( iPg>=1 || i>=0 );
+ assert( iPg<ArraySize(cntOld) );
if( abDone[iPg] ) continue; /* Skip pages already processed */
if( i>=0 /* On the upwards pass, or... */
|| cntOld[iPg-1]>=cntNew[iPg-1] /* Condition (1) is true */
@@ -77761,7 +79221,7 @@ static int btreeOverwriteContent(
){
int nData = pX->nData - iOffset;
if( nData<=0 ){
- /* Overwritting with zeros */
+ /* Overwriting with zeros */
int i;
for(i=0; i<iAmt && pDest[i]==0; i++){}
if( i<iAmt ){
@@ -77797,7 +79257,7 @@ static int btreeOverwriteContent(
** cell.
*/
static SQLITE_NOINLINE int btreeOverwriteOverflowCell(
- BtCursor *pCur, /* Cursor pointing to cell to ovewrite */
+ BtCursor *pCur, /* Cursor pointing to cell to overwrite */
const BtreePayload *pX /* Content to write into the cell */
){
int iOffset; /* Next byte of pX->pData to write */
@@ -78544,7 +80004,7 @@ static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){
MemPage *pRoot;
Pgno pgnoRoot;
int rc;
- int ptfFlags; /* Page-type flage for the root page of new table */
+ int ptfFlags; /* Page-type flags for the root page of new table */
assert( sqlite3BtreeHoldsMutex(p) );
assert( pBt->inTransaction==TRANS_WRITE );
@@ -78713,7 +80173,7 @@ static int clearDatabasePage(
if( pgno>btreePagecount(pBt) ){
return SQLITE_CORRUPT_BKPT;
}
- rc = getAndInitPage(pBt, pgno, &pPage, 0, 0);
+ rc = getAndInitPage(pBt, pgno, &pPage, 0);
if( rc ) return rc;
if( (pBt->openFlags & BTREE_SINGLE)==0
&& sqlite3PagerPageRefcount(pPage->pDbPage) != (1 + (pgno==1))
@@ -79134,7 +80594,8 @@ static void checkAppendMsg(
** corresponds to page iPg is already set.
*/
static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){
- assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 );
+ assert( pCheck->aPgRef!=0 );
+ assert( iPg<=pCheck->nCkPage && sizeof(pCheck->aPgRef[0])==1 );
return (pCheck->aPgRef[iPg/8] & (1 << (iPg & 0x07)));
}
@@ -79142,7 +80603,8 @@ static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){
** Set the bit in the IntegrityCk.aPgRef[] array that corresponds to page iPg.
*/
static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){
- assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 );
+ assert( pCheck->aPgRef!=0 );
+ assert( iPg<=pCheck->nCkPage && sizeof(pCheck->aPgRef[0])==1 );
pCheck->aPgRef[iPg/8] |= (1 << (iPg & 0x07));
}
@@ -79156,7 +80618,7 @@ static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){
** Also check that the page number is in bounds.
*/
static int checkRef(IntegrityCk *pCheck, Pgno iPage){
- if( iPage>pCheck->nPage || iPage==0 ){
+ if( iPage>pCheck->nCkPage || iPage==0 ){
checkAppendMsg(pCheck, "invalid page number %u", iPage);
return 1;
}
@@ -79379,10 +80841,11 @@ static int checkTreePage(
if( iPage==0 ) return 0;
if( checkRef(pCheck, iPage) ) return 0;
pCheck->zPfx = "Tree %u page %u: ";
- pCheck->v0 = pCheck->v1 = iPage;
+ pCheck->v1 = iPage;
if( (rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0 ){
checkAppendMsg(pCheck,
"unable to get the page. error code=%d", rc);
+ if( rc==SQLITE_IOERR_NOMEM ) pCheck->rc = SQLITE_NOMEM;
goto end_of_check;
}
@@ -79653,15 +81116,15 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck(
sCheck.db = db;
sCheck.pBt = pBt;
sCheck.pPager = pBt->pPager;
- sCheck.nPage = btreePagecount(sCheck.pBt);
+ sCheck.nCkPage = btreePagecount(sCheck.pBt);
sCheck.mxErr = mxErr;
sqlite3StrAccumInit(&sCheck.errMsg, 0, zErr, sizeof(zErr), SQLITE_MAX_LENGTH);
sCheck.errMsg.printfFlags = SQLITE_PRINTF_INTERNAL;
- if( sCheck.nPage==0 ){
+ if( sCheck.nCkPage==0 ){
goto integrity_ck_cleanup;
}
- sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1);
+ sCheck.aPgRef = sqlite3MallocZero((sCheck.nCkPage / 8)+ 1);
if( !sCheck.aPgRef ){
checkOom(&sCheck);
goto integrity_ck_cleanup;
@@ -79673,7 +81136,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck(
}
i = PENDING_BYTE_PAGE(pBt);
- if( i<=sCheck.nPage ) setPageReferenced(&sCheck, i);
+ if( i<=sCheck.nCkPage ) setPageReferenced(&sCheck, i);
/* Check the integrity of the freelist
*/
@@ -79716,6 +81179,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck(
checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0);
}
#endif
+ sCheck.v0 = aRoot[i];
checkTreePage(&sCheck, aRoot[i], &notUsed, LARGEST_INT64);
}
pBt->db->flags = savedDbFlags;
@@ -79723,7 +81187,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck(
/* Make sure every page in the file is referenced
*/
if( !bPartial ){
- for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){
+ for(i=1; i<=sCheck.nCkPage && sCheck.mxErr; i++){
#ifdef SQLITE_OMIT_AUTOVACUUM
if( getPageReferenced(&sCheck, i)==0 ){
checkAppendMsg(&sCheck, "Page %u: never used", i);
@@ -81143,6 +82607,40 @@ SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int szNew){
}
/*
+** If pMem is already a string, detect if it is a zero-terminated
+** string, or make it into one if possible, and mark it as such.
+**
+** This is an optimization. Correct operation continues even if
+** this routine is a no-op.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem *pMem){
+ if( (pMem->flags & (MEM_Str|MEM_Term|MEM_Ephem|MEM_Static))!=MEM_Str ){
+ /* pMem must be a string, and it cannot be an ephemeral or static string */
+ return;
+ }
+ if( pMem->enc!=SQLITE_UTF8 ) return;
+ if( NEVER(pMem->z==0) ) return;
+ if( pMem->flags & MEM_Dyn ){
+ if( pMem->xDel==sqlite3_free
+ && sqlite3_msize(pMem->z) >= (u64)(pMem->n+1)
+ ){
+ pMem->z[pMem->n] = 0;
+ pMem->flags |= MEM_Term;
+ return;
+ }
+ if( pMem->xDel==sqlite3RCStrUnref ){
+ /* Blindly assume that all RCStr objects are zero-terminated */
+ pMem->flags |= MEM_Term;
+ return;
+ }
+ }else if( pMem->szMalloc >= pMem->n+1 ){
+ pMem->z[pMem->n] = 0;
+ pMem->flags |= MEM_Term;
+ return;
+ }
+}
+
+/*
** It is already known that pMem contains an unterminated string.
** Add the zero terminator.
**
@@ -81404,36 +82902,6 @@ SQLITE_PRIVATE void sqlite3VdbeMemReleaseMalloc(Mem *p){
}
/*
-** Convert a 64-bit IEEE double into a 64-bit signed integer.
-** If the double is out of range of a 64-bit signed integer then
-** return the closest available 64-bit signed integer.
-*/
-static SQLITE_NOINLINE i64 doubleToInt64(double r){
-#ifdef SQLITE_OMIT_FLOATING_POINT
- /* When floating-point is omitted, double and int64 are the same thing */
- return r;
-#else
- /*
- ** Many compilers we encounter do not define constants for the
- ** minimum and maximum 64-bit integers, or they define them
- ** inconsistently. And many do not understand the "LL" notation.
- ** So we define our own static constants here using nothing
- ** larger than a 32-bit integer constant.
- */
- static const i64 maxInt = LARGEST_INT64;
- static const i64 minInt = SMALLEST_INT64;
-
- if( r<=(double)minInt ){
- return minInt;
- }else if( r>=(double)maxInt ){
- return maxInt;
- }else{
- return (i64)r;
- }
-#endif
-}
-
-/*
** Return some kind of integer value which is the best we can do
** at representing the value that *pMem describes as an integer.
** If pMem is an integer, then the value is exact. If pMem is
@@ -81459,7 +82927,7 @@ SQLITE_PRIVATE i64 sqlite3VdbeIntValue(const Mem *pMem){
testcase( flags & MEM_IntReal );
return pMem->u.i;
}else if( flags & MEM_Real ){
- return doubleToInt64(pMem->u.r);
+ return sqlite3RealToI64(pMem->u.r);
}else if( (flags & (MEM_Str|MEM_Blob))!=0 && pMem->z!=0 ){
return memIntValue(pMem);
}else{
@@ -81521,7 +82989,7 @@ SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){
if( pMem->flags & MEM_IntReal ){
MemSetTypeFlag(pMem, MEM_Int);
}else{
- i64 ix = doubleToInt64(pMem->u.r);
+ i64 ix = sqlite3RealToI64(pMem->u.r);
/* Only mark the value as an integer if
**
@@ -81589,8 +83057,8 @@ SQLITE_PRIVATE int sqlite3RealSameAsInt(double r1, sqlite3_int64 i){
** from UBSAN.
*/
SQLITE_PRIVATE i64 sqlite3RealToI64(double r){
- if( r<=(double)SMALLEST_INT64 ) return SMALLEST_INT64;
- if( r>=(double)LARGEST_INT64) return LARGEST_INT64;
+ if( r<-9223372036854774784.0 ) return SMALLEST_INT64;
+ if( r>+9223372036854774784.0 ) return LARGEST_INT64;
return (i64)r;
}
@@ -81661,6 +83129,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){
break;
}
default: {
+ int rc;
assert( aff==SQLITE_AFF_TEXT );
assert( MEM_Str==(MEM_Blob>>3) );
pMem->flags |= (pMem->flags&MEM_Blob)>>3;
@@ -81668,7 +83137,9 @@ SQLITE_PRIVATE int sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){
assert( pMem->flags & MEM_Str || pMem->db->mallocFailed );
pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal|MEM_Blob|MEM_Zero);
if( encoding!=SQLITE_UTF8 ) pMem->n &= ~1;
- return sqlite3VdbeChangeEncoding(pMem, encoding);
+ rc = sqlite3VdbeChangeEncoding(pMem, encoding);
+ if( rc ) return rc;
+ sqlite3VdbeMemZeroTerminateIfAble(pMem);
}
}
return SQLITE_OK;
@@ -82192,6 +83663,24 @@ SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){
return valueToText(pVal, enc);
}
+/* Return true if sqlit3_value object pVal is a string or blob value
+** that uses the destructor specified in the second argument.
+**
+** TODO: Maybe someday promote this interface into a published API so
+** that third-party extensions can get access to it?
+*/
+SQLITE_PRIVATE int sqlite3ValueIsOfClass(const sqlite3_value *pVal, void(*xFree)(void*)){
+ if( ALWAYS(pVal!=0)
+ && ALWAYS((pVal->flags & (MEM_Str|MEM_Blob))!=0)
+ && (pVal->flags & MEM_Dyn)!=0
+ && pVal->xDel==xFree
+ ){
+ return 1;
+ }else{
+ return 0;
+ }
+}
+
/*
** Create a new sqlite3_value object.
*/
@@ -82259,6 +83748,7 @@ static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){
}
pRec->nField = p->iVal+1;
+ sqlite3VdbeMemSetNull(&pRec->aMem[p->iVal]);
return &pRec->aMem[p->iVal];
}
#else
@@ -82317,7 +83807,7 @@ static int valueFromFunction(
#endif
assert( pFunc );
if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0
- || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)
+ || (pFunc->funcFlags & (SQLITE_FUNC_NEEDCOLL|SQLITE_FUNC_RUNONLY))!=0
){
return SQLITE_OK;
}
@@ -82518,6 +84008,7 @@ static int valueFromExpr(
if( pVal ){
pVal->flags = MEM_Int;
pVal->u.i = pExpr->u.zToken[4]==0;
+ sqlite3ValueApplyAffinity(pVal, affinity, enc);
}
}
@@ -83040,14 +84531,44 @@ static int growOpArray(Vdbe *v, int nOp){
** sqlite3CantopenError(lineno)
*/
static void test_addop_breakpoint(int pc, Op *pOp){
- static int n = 0;
+ static u64 n = 0;
(void)pc;
(void)pOp;
n++;
+ if( n==LARGEST_UINT64 ) abort(); /* so that n is used, preventing a warning */
}
#endif
/*
+** Slow paths for sqlite3VdbeAddOp3() and sqlite3VdbeAddOp4Int() for the
+** unusual case when we need to increase the size of the Vdbe.aOp[] array
+** before adding the new opcode.
+*/
+static SQLITE_NOINLINE int growOp3(Vdbe *p, int op, int p1, int p2, int p3){
+ assert( p->nOpAlloc<=p->nOp );
+ if( growOpArray(p, 1) ) return 1;
+ assert( p->nOpAlloc>p->nOp );
+ return sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+}
+static SQLITE_NOINLINE int addOp4IntSlow(
+ Vdbe *p, /* Add the opcode to this VM */
+ int op, /* The new opcode */
+ int p1, /* The P1 operand */
+ int p2, /* The P2 operand */
+ int p3, /* The P3 operand */
+ int p4 /* The P4 operand as an integer */
+){
+ int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+ if( p->db->mallocFailed==0 ){
+ VdbeOp *pOp = &p->aOp[addr];
+ pOp->p4type = P4_INT32;
+ pOp->p4.i = p4;
+ }
+ return addr;
+}
+
+
+/*
** Add a new instruction to the list of instructions current in the
** VDBE. Return the address of the new instruction.
**
@@ -83057,17 +84578,16 @@ static void test_addop_breakpoint(int pc, Op *pOp){
**
** op The opcode for this instruction
**
-** p1, p2, p3 Operands
-**
-** Use the sqlite3VdbeResolveLabel() function to fix an address and
-** the sqlite3VdbeChangeP4() function to change the value of the P4
-** operand.
+** p1, p2, p3, p4 Operands
*/
-static SQLITE_NOINLINE int growOp3(Vdbe *p, int op, int p1, int p2, int p3){
- assert( p->nOpAlloc<=p->nOp );
- if( growOpArray(p, 1) ) return 1;
- assert( p->nOpAlloc>p->nOp );
- return sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){
+ return sqlite3VdbeAddOp3(p, op, 0, 0, 0);
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){
+ return sqlite3VdbeAddOp3(p, op, p1, 0, 0);
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){
+ return sqlite3VdbeAddOp3(p, op, p1, p2, 0);
}
SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
int i;
@@ -83090,6 +84610,9 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
pOp->p3 = p3;
pOp->p4.p = 0;
pOp->p4type = P4_NOTUSED;
+
+ /* Replicate this logic in sqlite3VdbeAddOp4Int()
+ ** vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
pOp->zComment = 0;
#endif
@@ -83106,16 +84629,59 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
#ifdef SQLITE_VDBE_COVERAGE
pOp->iSrcLine = 0;
#endif
+ /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ ** Replicate in sqlite3VdbeAddOp4Int() */
+
return i;
}
-SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){
- return sqlite3VdbeAddOp3(p, op, 0, 0, 0);
-}
-SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){
- return sqlite3VdbeAddOp3(p, op, p1, 0, 0);
-}
-SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){
- return sqlite3VdbeAddOp3(p, op, p1, p2, 0);
+SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(
+ Vdbe *p, /* Add the opcode to this VM */
+ int op, /* The new opcode */
+ int p1, /* The P1 operand */
+ int p2, /* The P2 operand */
+ int p3, /* The P3 operand */
+ int p4 /* The P4 operand as an integer */
+){
+ int i;
+ VdbeOp *pOp;
+
+ i = p->nOp;
+ if( p->nOpAlloc<=i ){
+ return addOp4IntSlow(p, op, p1, p2, p3, p4);
+ }
+ p->nOp++;
+ pOp = &p->aOp[i];
+ assert( pOp!=0 );
+ pOp->opcode = (u8)op;
+ pOp->p5 = 0;
+ pOp->p1 = p1;
+ pOp->p2 = p2;
+ pOp->p3 = p3;
+ pOp->p4.i = p4;
+ pOp->p4type = P4_INT32;
+
+ /* Replicate this logic in sqlite3VdbeAddOp3()
+ ** vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ pOp->zComment = 0;
+#endif
+#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE)
+ pOp->nExec = 0;
+ pOp->nCycle = 0;
+#endif
+#ifdef SQLITE_DEBUG
+ if( p->db->flags & SQLITE_VdbeAddopTrace ){
+ sqlite3VdbePrintOp(0, i, &p->aOp[i]);
+ test_addop_breakpoint(i, &p->aOp[i]);
+ }
+#endif
+#ifdef SQLITE_VDBE_COVERAGE
+ pOp->iSrcLine = 0;
+#endif
+ /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ ** Replicate in sqlite3VdbeAddOp3() */
+
+ return i;
}
/* Generate code for an unconditional jump to instruction iDest
@@ -83293,7 +84859,7 @@ SQLITE_PRIVATE int sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt,
if( bPush){
pParse->addrExplain = iThis;
}
- sqlite3VdbeScanStatus(v, iThis, 0, 0, 0, 0);
+ sqlite3VdbeScanStatus(v, iThis, -1, -1, 0, 0);
}
return addr;
}
@@ -83323,26 +84889,6 @@ SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere,
sqlite3MayAbort(p->pParse);
}
-/*
-** Add an opcode that includes the p4 value as an integer.
-*/
-SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(
- Vdbe *p, /* Add the opcode to this VM */
- int op, /* The new opcode */
- int p1, /* The P1 operand */
- int p2, /* The P2 operand */
- int p3, /* The P3 operand */
- int p4 /* The P4 operand as an integer */
-){
- int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3);
- if( p->db->mallocFailed==0 ){
- VdbeOp *pOp = &p->aOp[addr];
- pOp->p4type = P4_INT32;
- pOp->p4.i = p4;
- }
- return addr;
-}
-
/* Insert the end of a co-routine
*/
SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe *v, int regYield){
@@ -83655,7 +85201,7 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
p->bIsReader = 0;
pOp = &p->aOp[p->nOp-1];
assert( p->aOp[0].opcode==OP_Init );
- while( 1 /* Loop termates when it reaches the OP_Init opcode */ ){
+ while( 1 /* Loop terminates when it reaches the OP_Init opcode */ ){
/* Only JUMP opcodes and the short list of special opcodes in the switch
** below need to be considered. The mkopcodeh.tcl generator script groups
** all these opcodes together near the front of the opcode list. Skip
@@ -83777,6 +85323,10 @@ SQLITE_PRIVATE void sqlite3VdbeNoJumpsOutsideSubrtn(
int iDest = pOp->p2; /* Jump destination */
if( iDest==0 ) continue;
if( pOp->opcode==OP_Gosub ) continue;
+ if( pOp->p3==20230325 && pOp->opcode==OP_NotNull ){
+ /* This is a deliberately taken illegal branch. tag-20230325-2 */
+ continue;
+ }
if( iDest<0 ){
int j = ADDR(iDest);
assert( j>=0 );
@@ -84025,8 +85575,8 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatusCounters(
pScan = 0;
}
if( pScan ){
- pScan->addrLoop = addrLoop;
- pScan->addrVisit = addrVisit;
+ if( addrLoop>0 ) pScan->addrLoop = addrLoop;
+ if( addrVisit>0 ) pScan->addrVisit = addrVisit;
}
}
}
@@ -84109,7 +85659,7 @@ SQLITE_PRIVATE void sqlite3VdbeJumpHereOrPopInst(Vdbe *p, int addr){
/*
** If the input FuncDef structure is ephemeral, then free it. If
-** the FuncDef is not ephermal, then do nothing.
+** the FuncDef is not ephemeral, then do nothing.
*/
static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){
assert( db!=0 );
@@ -84170,6 +85720,10 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){
if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4);
break;
}
+ case P4_TABLEREF: {
+ if( db->pnBytesFreed==0 ) sqlite3DeleteTable(db, (Table*)p4);
+ break;
+ }
}
}
@@ -84273,7 +85827,6 @@ SQLITE_PRIVATE void sqlite3VdbeReleaseRegisters(
}
#endif /* SQLITE_DEBUG */
-
/*
** Change the value of the P4 operand for a specific instruction.
** This routine is useful when a large program is loaded from a
@@ -84298,7 +85851,7 @@ static void SQLITE_NOINLINE vdbeChangeP4Full(
int n
){
if( pOp->p4type ){
- freeP4(p->db, pOp->p4type, pOp->p4.p);
+ assert( pOp->p4type > P4_FREE_IF_LE );
pOp->p4type = 0;
pOp->p4.p = 0;
}
@@ -85194,7 +86747,7 @@ SQLITE_PRIVATE int sqlite3VdbeList(
sqlite3VdbeMemSetInt64(pMem+1, pOp->p2);
sqlite3VdbeMemSetInt64(pMem+2, pOp->p3);
sqlite3VdbeMemSetStr(pMem+3, zP4, -1, SQLITE_UTF8, sqlite3_free);
- p->nResColumn = 4;
+ assert( p->nResColumn==4 );
}else{
sqlite3VdbeMemSetInt64(pMem+0, i);
sqlite3VdbeMemSetStr(pMem+1, (char*)sqlite3OpcodeName(pOp->opcode),
@@ -85213,7 +86766,7 @@ SQLITE_PRIVATE int sqlite3VdbeList(
sqlite3VdbeMemSetNull(pMem+7);
#endif
sqlite3VdbeMemSetStr(pMem+5, zP4, -1, SQLITE_UTF8, sqlite3_free);
- p->nResColumn = 8;
+ assert( p->nResColumn==8 );
}
p->pResultRow = pMem;
if( db->mallocFailed ){
@@ -85427,26 +86980,9 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
resolveP2Values(p, &nArg);
p->usesStmtJournal = (u8)(pParse->isMultiWrite && pParse->mayAbort);
if( pParse->explain ){
- static const char * const azColName[] = {
- "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment",
- "id", "parent", "notused", "detail"
- };
- int iFirst, mx, i;
if( nMem<10 ) nMem = 10;
p->explain = pParse->explain;
- if( pParse->explain==2 ){
- sqlite3VdbeSetNumCols(p, 4);
- iFirst = 8;
- mx = 12;
- }else{
- sqlite3VdbeSetNumCols(p, 8);
- iFirst = 0;
- mx = 8;
- }
- for(i=iFirst; i<mx; i++){
- sqlite3VdbeSetColName(p, i-iFirst, COLNAME_NAME,
- azColName[i], SQLITE_STATIC);
- }
+ p->nResColumn = 12 - 4*p->explain;
}
p->expired = 0;
@@ -85498,7 +87034,23 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){
if( pCx ) sqlite3VdbeFreeCursorNN(p,pCx);
}
+static SQLITE_NOINLINE void freeCursorWithCache(Vdbe *p, VdbeCursor *pCx){
+ VdbeTxtBlbCache *pCache = pCx->pCache;
+ assert( pCx->colCache );
+ pCx->colCache = 0;
+ pCx->pCache = 0;
+ if( pCache->pCValue ){
+ sqlite3RCStrUnref(pCache->pCValue);
+ pCache->pCValue = 0;
+ }
+ sqlite3DbFree(p->db, pCache);
+ sqlite3VdbeFreeCursorNN(p, pCx);
+}
SQLITE_PRIVATE void sqlite3VdbeFreeCursorNN(Vdbe *p, VdbeCursor *pCx){
+ if( pCx->colCache ){
+ freeCursorWithCache(p, pCx);
+ return;
+ }
switch( pCx->eCurType ){
case CURTYPE_SORTER: {
sqlite3VdbeSorterClose(p->db, pCx);
@@ -85599,12 +87151,12 @@ SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){
int n;
sqlite3 *db = p->db;
- if( p->nResColumn ){
- releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
+ if( p->nResAlloc ){
+ releaseMemArray(p->aColName, p->nResAlloc*COLNAME_N);
sqlite3DbFree(db, p->aColName);
}
n = nResColumn*COLNAME_N;
- p->nResColumn = (u16)nResColumn;
+ p->nResColumn = p->nResAlloc = (u16)nResColumn;
p->aColName = (Mem*)sqlite3DbMallocRawNN(db, sizeof(Mem)*n );
if( p->aColName==0 ) return;
initMemArray(p->aColName, n, db, MEM_Null);
@@ -85629,14 +87181,14 @@ SQLITE_PRIVATE int sqlite3VdbeSetColName(
){
int rc;
Mem *pColName;
- assert( idx<p->nResColumn );
+ assert( idx<p->nResAlloc );
assert( var<COLNAME_N );
if( p->db->mallocFailed ){
assert( !zName || xDel!=SQLITE_DYNAMIC );
return SQLITE_NOMEM_BKPT;
}
assert( p->aColName!=0 );
- pColName = &(p->aColName[idx+var*p->nResColumn]);
+ pColName = &(p->aColName[idx+var*p->nResAlloc]);
rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, xDel);
assert( rc!=0 || !zName || (pColName->flags&MEM_Term)!=0 );
return rc;
@@ -86149,6 +87701,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
sqlite3VdbeLeave(p);
return SQLITE_BUSY;
}else if( rc!=SQLITE_OK ){
+ sqlite3SystemError(db, rc);
p->rc = rc;
sqlite3RollbackAll(db, SQLITE_OK);
p->nChange = 0;
@@ -86460,7 +88013,7 @@ static void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){
assert( db!=0 );
assert( p->db==0 || p->db==db );
if( p->aColName ){
- releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
+ releaseMemArray(p->aColName, p->nResAlloc*COLNAME_N);
sqlite3DbNNFreeNN(db, p->aColName);
}
for(pSub=p->pProgram; pSub; pSub=pNext){
@@ -87060,6 +88613,15 @@ static int vdbeRecordCompareDebug(
if( d1+(u64)serial_type1+2>(u64)nKey1
&& d1+(u64)sqlite3VdbeSerialTypeLen(serial_type1)>(u64)nKey1
){
+ if( serial_type1>=1
+ && serial_type1<=7
+ && d1+(u64)sqlite3VdbeSerialTypeLen(serial_type1)<=(u64)nKey1+8
+ && CORRUPT_DB
+ ){
+ return 1; /* corrupt record not detected by
+ ** sqlite3VdbeRecordCompareWithSkip(). Return true
+ ** to avoid firing the assert() */
+ }
break;
}
@@ -87228,20 +88790,33 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem
return n1 - n2;
}
+/* The following two functions are used only within testcase() to prove
+** test coverage. These functions do no exist for production builds.
+** We must use separate SQLITE_NOINLINE functions here, since otherwise
+** optimizer code movement causes gcov to become very confused.
+*/
+#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_DEBUG)
+static int SQLITE_NOINLINE doubleLt(double a, double b){ return a<b; }
+static int SQLITE_NOINLINE doubleEq(double a, double b){ return a==b; }
+#endif
+
/*
** Do a comparison between a 64-bit signed integer and a 64-bit floating-point
** number. Return negative, zero, or positive if the first (i64) is less than,
** equal to, or greater than the second (double).
*/
SQLITE_PRIVATE int sqlite3IntFloatCompare(i64 i, double r){
- if( sizeof(LONGDOUBLE_TYPE)>8 ){
+ if( sqlite3IsNaN(r) ){
+ /* SQLite considers NaN to be a NULL. And all integer values are greater
+ ** than NULL */
+ return 1;
+ }
+ if( sqlite3Config.bUseLongDouble ){
LONGDOUBLE_TYPE x = (LONGDOUBLE_TYPE)i;
testcase( x<r );
testcase( x>r );
testcase( x==r );
- if( x<r ) return -1;
- if( x>r ) return +1; /*NO_TEST*/ /* work around bugs in gcov */
- return 0; /*NO_TEST*/ /* work around bugs in gcov */
+ return (x<r) ? -1 : (x>r);
}else{
i64 y;
double s;
@@ -87251,9 +88826,10 @@ SQLITE_PRIVATE int sqlite3IntFloatCompare(i64 i, double r){
if( i<y ) return -1;
if( i>y ) return +1;
s = (double)i;
- if( s<r ) return -1;
- if( s>r ) return +1;
- return 0;
+ testcase( doubleLt(s,r) );
+ testcase( doubleLt(r,s) );
+ testcase( doubleEq(r,s) );
+ return (s<r) ? -1 : (s>r);
}
}
@@ -87503,7 +89079,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
/* Serial types 12 or greater are strings and blobs (greater than
** numbers). Types 10 and 11 are currently "reserved for future
** use", so it doesn't really matter what the results of comparing
- ** them to numberic values are. */
+ ** them to numeric values are. */
rc = serial_type==10 ? -1 : +1;
}else if( serial_type==0 ){
rc = -1;
@@ -88398,7 +89974,15 @@ SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt *pStmt){
int rc = SQLITE_OK;
Vdbe *p = (Vdbe*)pStmt;
#if SQLITE_THREADSAFE
- sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex;
+ sqlite3_mutex *mutex;
+#endif
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pStmt==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+#if SQLITE_THREADSAFE
+ mutex = p->db->mutex;
#endif
sqlite3_mutex_enter(mutex);
for(i=0; i<p->nVar; i++){
@@ -88621,7 +90205,7 @@ SQLITE_API void sqlite3_value_free(sqlite3_value *pOld){
** is too big or if an OOM occurs.
**
** The invokeValueDestructor(P,X) routine invokes destructor function X()
-** on value P is not going to be used and need to be destroyed.
+** on value P if P is not going to be used and need to be destroyed.
*/
static void setResultStrOrError(
sqlite3_context *pCtx, /* Function context */
@@ -88651,7 +90235,7 @@ static void setResultStrOrError(
static int invokeValueDestructor(
const void *p, /* Value to destroy */
void (*xDel)(void*), /* The destructor */
- sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if no NULL */
+ sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if not NULL */
){
assert( xDel!=SQLITE_DYNAMIC );
if( xDel==0 ){
@@ -88661,7 +90245,14 @@ static int invokeValueDestructor(
}else{
xDel((void*)p);
}
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx!=0 ){
+ sqlite3_result_error_toobig(pCtx);
+ }
+#else
+ assert( pCtx!=0 );
sqlite3_result_error_toobig(pCtx);
+#endif
return SQLITE_TOOBIG;
}
SQLITE_API void sqlite3_result_blob(
@@ -88670,6 +90261,12 @@ SQLITE_API void sqlite3_result_blob(
int n,
void (*xDel)(void *)
){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 || n<0 ){
+ invokeValueDestructor(z, xDel, pCtx);
+ return;
+ }
+#endif
assert( n>=0 );
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
setResultStrOrError(pCtx, z, n, 0, xDel);
@@ -88680,8 +90277,14 @@ SQLITE_API void sqlite3_result_blob64(
sqlite3_uint64 n,
void (*xDel)(void *)
){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
assert( xDel!=SQLITE_DYNAMIC );
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ){
+ invokeValueDestructor(z, xDel, 0);
+ return;
+ }
+#endif
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
if( n>0x7fffffff ){
(void)invokeValueDestructor(z, xDel, pCtx);
}else{
@@ -88689,30 +90292,48 @@ SQLITE_API void sqlite3_result_blob64(
}
}
SQLITE_API void sqlite3_result_double(sqlite3_context *pCtx, double rVal){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetDouble(pCtx->pOut, rVal);
}
SQLITE_API void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
pCtx->isError = SQLITE_ERROR;
sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT);
}
#ifndef SQLITE_OMIT_UTF16
SQLITE_API void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
pCtx->isError = SQLITE_ERROR;
sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT);
}
#endif
SQLITE_API void sqlite3_result_int(sqlite3_context *pCtx, int iVal){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetInt64(pCtx->pOut, (i64)iVal);
}
SQLITE_API void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetInt64(pCtx->pOut, iVal);
}
SQLITE_API void sqlite3_result_null(sqlite3_context *pCtx){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetNull(pCtx->pOut);
}
@@ -88722,14 +90343,37 @@ SQLITE_API void sqlite3_result_pointer(
const char *zPType,
void (*xDestructor)(void*)
){
- Mem *pOut = pCtx->pOut;
+ Mem *pOut;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ){
+ invokeValueDestructor(pPtr, xDestructor, 0);
+ return;
+ }
+#endif
+ pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pOut->db->mutex) );
sqlite3VdbeMemRelease(pOut);
pOut->flags = MEM_Null;
sqlite3VdbeMemSetPointer(pOut, pPtr, zPType, xDestructor);
}
SQLITE_API void sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubtype){
- Mem *pOut = pCtx->pOut;
+ Mem *pOut;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
+#if defined(SQLITE_STRICT_SUBTYPE) && SQLITE_STRICT_SUBTYPE+0!=0
+ if( pCtx->pFunc!=0
+ && (pCtx->pFunc->funcFlags & SQLITE_RESULT_SUBTYPE)==0
+ ){
+ char zErr[200];
+ sqlite3_snprintf(sizeof(zErr), zErr,
+ "misuse of sqlite3_result_subtype() by %s()",
+ pCtx->pFunc->zName);
+ sqlite3_result_error(pCtx, zErr, -1);
+ return;
+ }
+#endif /* SQLITE_STRICT_SUBTYPE */
+ pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pOut->db->mutex) );
pOut->eSubtype = eSubtype & 0xff;
pOut->flags |= MEM_Subtype;
@@ -88740,6 +90384,12 @@ SQLITE_API void sqlite3_result_text(
int n,
void (*xDel)(void *)
){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ){
+ invokeValueDestructor(z, xDel, 0);
+ return;
+ }
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel);
}
@@ -88750,6 +90400,12 @@ SQLITE_API void sqlite3_result_text64(
void (*xDel)(void *),
unsigned char enc
){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ){
+ invokeValueDestructor(z, xDel, 0);
+ return;
+ }
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
assert( xDel!=SQLITE_DYNAMIC );
if( enc!=SQLITE_UTF8 ){
@@ -88760,6 +90416,7 @@ SQLITE_API void sqlite3_result_text64(
(void)invokeValueDestructor(z, xDel, pCtx);
}else{
setResultStrOrError(pCtx, z, (int)n, enc, xDel);
+ sqlite3VdbeMemZeroTerminateIfAble(pCtx->pOut);
}
}
#ifndef SQLITE_OMIT_UTF16
@@ -88792,7 +90449,16 @@ SQLITE_API void sqlite3_result_text16le(
}
#endif /* SQLITE_OMIT_UTF16 */
SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){
- Mem *pOut = pCtx->pOut;
+ Mem *pOut;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+ if( pValue==0 ){
+ sqlite3_result_null(pCtx);
+ return;
+ }
+#endif
+ pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemCopy(pOut, pValue);
sqlite3VdbeChangeEncoding(pOut, pCtx->enc);
@@ -88804,7 +90470,12 @@ SQLITE_API void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){
sqlite3_result_zeroblob64(pCtx, n>0 ? n : 0);
}
SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){
- Mem *pOut = pCtx->pOut;
+ Mem *pOut;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pOut->db->mutex) );
if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){
sqlite3_result_error_toobig(pCtx);
@@ -88818,6 +90489,9 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){
#endif
}
SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
pCtx->isError = errCode ? errCode : -1;
#ifdef SQLITE_DEBUG
if( pCtx->pVdbe ) pCtx->pVdbe->rcApp = errCode;
@@ -88830,6 +90504,9 @@ SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){
/* Force an SQLITE_TOOBIG error. */
SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
pCtx->isError = SQLITE_TOOBIG;
sqlite3VdbeMemSetStr(pCtx->pOut, "string or blob too big", -1,
@@ -88838,6 +90515,9 @@ SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){
/* An SQLITE_NOMEM error. */
SQLITE_API void sqlite3_result_error_nomem(sqlite3_context *pCtx){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetNull(pCtx->pOut);
pCtx->isError = SQLITE_NOMEM_BKPT;
@@ -89090,6 +90770,9 @@ SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){
** pointer to it.
*/
SQLITE_API void *sqlite3_user_data(sqlite3_context *p){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ) return 0;
+#endif
assert( p && p->pFunc );
return p->pFunc->pUserData;
}
@@ -89105,7 +90788,11 @@ SQLITE_API void *sqlite3_user_data(sqlite3_context *p){
** application defined function.
*/
SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ) return 0;
+#else
assert( p && p->pOut );
+#endif
return p->pOut->db;
}
@@ -89124,7 +90811,11 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){
** value, as a signal to the xUpdate routine that the column is unchanged.
*/
SQLITE_API int sqlite3_vtab_nochange(sqlite3_context *p){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ) return 0;
+#else
assert( p );
+#endif
return sqlite3_value_nochange(p->pOut);
}
@@ -89132,7 +90823,7 @@ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context *p){
** The destructor function for a ValueList object. This needs to be
** a separate function, unknowable to the application, to ensure that
** calls to sqlite3_vtab_in_first()/sqlite3_vtab_in_next() that are not
-** preceeded by activation of IN processing via sqlite3_vtab_int() do not
+** preceded by activation of IN processing via sqlite3_vtab_int() do not
** try to access a fake ValueList object inserted by a hostile extension.
*/
SQLITE_PRIVATE void sqlite3VdbeValueListFree(void *pToDelete){
@@ -89152,7 +90843,7 @@ static int valueFromValueList(
ValueList *pRhs;
*ppOut = 0;
- if( pVal==0 ) return SQLITE_MISUSE;
+ if( pVal==0 ) return SQLITE_MISUSE_BKPT;
if( (pVal->flags & MEM_Dyn)==0 || pVal->xDel!=sqlite3VdbeValueListFree ){
return SQLITE_ERROR;
}else{
@@ -89283,6 +90974,9 @@ SQLITE_API void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){
SQLITE_API void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){
AuxData *pAuxData;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return 0;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
#if SQLITE_ENABLE_STAT4
if( pCtx->pVdbe==0 ) return 0;
@@ -89315,8 +91009,12 @@ SQLITE_API void sqlite3_set_auxdata(
void (*xDelete)(void*)
){
AuxData *pAuxData;
- Vdbe *pVdbe = pCtx->pVdbe;
+ Vdbe *pVdbe;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
+ pVdbe= pCtx->pVdbe;
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
#ifdef SQLITE_ENABLE_STAT4
if( pVdbe==0 ) goto failed;
@@ -89372,7 +91070,8 @@ SQLITE_API int sqlite3_aggregate_count(sqlite3_context *p){
*/
SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){
Vdbe *pVm = (Vdbe *)pStmt;
- return pVm ? pVm->nResColumn : 0;
+ if( pVm==0 ) return 0;
+ return pVm->nResColumn;
}
/*
@@ -89461,7 +91160,7 @@ static Mem *columnMem(sqlite3_stmt *pStmt, int i){
** sqlite3_column_real()
** sqlite3_column_bytes()
** sqlite3_column_bytes16()
-** sqiite3_column_blob()
+** sqlite3_column_blob()
*/
static void columnMallocFailure(sqlite3_stmt *pStmt)
{
@@ -89546,6 +91245,32 @@ SQLITE_API int sqlite3_column_type(sqlite3_stmt *pStmt, int i){
}
/*
+** Column names appropriate for EXPLAIN or EXPLAIN QUERY PLAN.
+*/
+static const char * const azExplainColNames8[] = {
+ "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment", /* EXPLAIN */
+ "id", "parent", "notused", "detail" /* EQP */
+};
+static const u16 azExplainColNames16data[] = {
+ /* 0 */ 'a', 'd', 'd', 'r', 0,
+ /* 5 */ 'o', 'p', 'c', 'o', 'd', 'e', 0,
+ /* 12 */ 'p', '1', 0,
+ /* 15 */ 'p', '2', 0,
+ /* 18 */ 'p', '3', 0,
+ /* 21 */ 'p', '4', 0,
+ /* 24 */ 'p', '5', 0,
+ /* 27 */ 'c', 'o', 'm', 'm', 'e', 'n', 't', 0,
+ /* 35 */ 'i', 'd', 0,
+ /* 38 */ 'p', 'a', 'r', 'e', 'n', 't', 0,
+ /* 45 */ 'n', 'o', 't', 'u', 's', 'e', 'd', 0,
+ /* 53 */ 'd', 'e', 't', 'a', 'i', 'l', 0
+};
+static const u8 iExplainColNames16[] = {
+ 0, 5, 12, 15, 18, 21, 24, 27,
+ 35, 38, 45, 53
+};
+
+/*
** Convert the N-th element of pStmt->pColName[] into a string using
** xFunc() then return that string. If N is out of range, return 0.
**
@@ -89577,15 +91302,29 @@ static const void *columnName(
return 0;
}
#endif
+ if( N<0 ) return 0;
ret = 0;
p = (Vdbe *)pStmt;
db = p->db;
assert( db!=0 );
- n = sqlite3_column_count(pStmt);
- if( N<n && N>=0 ){
+ sqlite3_mutex_enter(db->mutex);
+
+ if( p->explain ){
+ if( useType>0 ) goto columnName_end;
+ n = p->explain==1 ? 8 : 4;
+ if( N>=n ) goto columnName_end;
+ if( useUtf16 ){
+ int i = iExplainColNames16[N + 8*p->explain - 8];
+ ret = (void*)&azExplainColNames16data[i];
+ }else{
+ ret = (void*)azExplainColNames8[N + 8*p->explain - 8];
+ }
+ goto columnName_end;
+ }
+ n = p->nResColumn;
+ if( N<n ){
u8 prior_mallocFailed = db->mallocFailed;
N += useType*n;
- sqlite3_mutex_enter(db->mutex);
#ifndef SQLITE_OMIT_UTF16
if( useUtf16 ){
ret = sqlite3_value_text16((sqlite3_value*)&p->aColName[N]);
@@ -89602,8 +91341,9 @@ static const void *columnName(
sqlite3OomClear(db);
ret = 0;
}
- sqlite3_mutex_leave(db->mutex);
}
+columnName_end:
+ sqlite3_mutex_leave(db->mutex);
return ret;
}
@@ -89696,7 +91436,7 @@ SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){
/*
** Unbind the value bound to variable i in virtual machine p. This is the
** the same as binding a NULL value to the column. If the "i" parameter is
-** out of range, then SQLITE_RANGE is returned. Othewise SQLITE_OK.
+** out of range, then SQLITE_RANGE is returned. Otherwise SQLITE_OK.
**
** A successful evaluation of this routine acquires the mutex on p.
** the mutex is released if any kind of error occurs.
@@ -89711,7 +91451,7 @@ static int vdbeUnbind(Vdbe *p, unsigned int i){
}
sqlite3_mutex_enter(p->db->mutex);
if( p->eVdbeState!=VDBE_READY_STATE ){
- sqlite3Error(p->db, SQLITE_MISUSE);
+ sqlite3Error(p->db, SQLITE_MISUSE_BKPT);
sqlite3_mutex_leave(p->db->mutex);
sqlite3_log(SQLITE_MISUSE,
"bind on a busy prepared statement: [%s]", p->zSql);
@@ -89940,6 +91680,9 @@ SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){
SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt *pStmt, int i, sqlite3_uint64 n){
int rc;
Vdbe *p = (Vdbe *)pStmt;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ) return SQLITE_MISUSE_BKPT;
+#endif
sqlite3_mutex_enter(p->db->mutex);
if( n>(u64)p->db->aLimit[SQLITE_LIMIT_LENGTH] ){
rc = SQLITE_TOOBIG;
@@ -90061,6 +91804,42 @@ SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt){
}
/*
+** Set the explain mode for a statement.
+*/
+SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode){
+ Vdbe *v = (Vdbe*)pStmt;
+ int rc;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pStmt==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(v->db->mutex);
+ if( ((int)v->explain)==eMode ){
+ rc = SQLITE_OK;
+ }else if( eMode<0 || eMode>2 ){
+ rc = SQLITE_ERROR;
+ }else if( (v->prepFlags & SQLITE_PREPARE_SAVESQL)==0 ){
+ rc = SQLITE_ERROR;
+ }else if( v->eVdbeState!=VDBE_READY_STATE ){
+ rc = SQLITE_BUSY;
+ }else if( v->nMem>=10 && (eMode!=2 || v->haveEqpOps) ){
+ /* No reprepare necessary */
+ v->explain = eMode;
+ rc = SQLITE_OK;
+ }else{
+ v->explain = eMode;
+ rc = sqlite3Reprepare(v);
+ v->haveEqpOps = eMode==2;
+ }
+ if( v->explain ){
+ v->nResColumn = 12 - 4*v->explain;
+ }else{
+ v->nResColumn = v->nResAlloc;
+ }
+ sqlite3_mutex_leave(v->db->mutex);
+ return rc;
+}
+
+/*
** Return true if the prepared statement is in need of being reset.
*/
SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt *pStmt){
@@ -90199,10 +91978,16 @@ static UnpackedRecord *vdbeUnpackRecord(
** a field of the row currently being updated or deleted.
*/
SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
- PreUpdate *p = db->pPreUpdate;
+ PreUpdate *p;
Mem *pMem;
int rc = SQLITE_OK;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( db==0 || ppValue==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ p = db->pPreUpdate;
/* Test that this call is being made from within an SQLITE_DELETE or
** SQLITE_UPDATE pre-update callback, and that iIdx is within range. */
if( !p || p->op==SQLITE_INSERT ){
@@ -90263,7 +92048,12 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa
** the number of columns in the row being updated, deleted or inserted.
*/
SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){
- PreUpdate *p = db->pPreUpdate;
+ PreUpdate *p;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ p = db!=0 ? db->pPreUpdate : 0;
+#else
+ p = db->pPreUpdate;
+#endif
return (p ? p->keyinfo.nKeyField : 0);
}
#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
@@ -90281,7 +92071,12 @@ SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){
** or SET DEFAULT action is considered a trigger.
*/
SQLITE_API int sqlite3_preupdate_depth(sqlite3 *db){
- PreUpdate *p = db->pPreUpdate;
+ PreUpdate *p;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ p = db!=0 ? db->pPreUpdate : 0;
+#else
+ p = db->pPreUpdate;
+#endif
return (p ? p->v->nFrame : 0);
}
#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
@@ -90292,7 +92087,12 @@ SQLITE_API int sqlite3_preupdate_depth(sqlite3 *db){
** only.
*/
SQLITE_API int sqlite3_preupdate_blobwrite(sqlite3 *db){
- PreUpdate *p = db->pPreUpdate;
+ PreUpdate *p;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ p = db!=0 ? db->pPreUpdate : 0;
+#else
+ p = db->pPreUpdate;
+#endif
return (p ? p->iBlobWrite : -1);
}
#endif
@@ -90303,10 +92103,16 @@ SQLITE_API int sqlite3_preupdate_blobwrite(sqlite3 *db){
** a field of the row currently being updated or inserted.
*/
SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
- PreUpdate *p = db->pPreUpdate;
+ PreUpdate *p;
int rc = SQLITE_OK;
Mem *pMem;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( db==0 || ppValue==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ p = db->pPreUpdate;
if( !p || p->op==SQLITE_DELETE ){
rc = SQLITE_MISUSE_BKPT;
goto preupdate_new_out;
@@ -90385,11 +92191,20 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2(
void *pOut /* OUT: Write the answer here */
){
Vdbe *p = (Vdbe*)pStmt;
- VdbeOp *aOp = p->aOp;
- int nOp = p->nOp;
+ VdbeOp *aOp;
+ int nOp;
ScanStatus *pScan = 0;
int idx;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 || pOut==0
+ || iScanStatusOp<SQLITE_SCANSTAT_NLOOP
+ || iScanStatusOp>SQLITE_SCANSTAT_NCYCLE ){
+ return 1;
+ }
+#endif
+ aOp = p->aOp;
+ nOp = p->nOp;
if( p->pFrame ){
VdbeFrame *pFrame;
for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent);
@@ -90536,7 +92351,7 @@ SQLITE_API int sqlite3_stmt_scanstatus(
SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){
Vdbe *p = (Vdbe*)pStmt;
int ii;
- for(ii=0; ii<p->nOp; ii++){
+ for(ii=0; p!=0 && ii<p->nOp; ii++){
Op *pOp = &p->aOp[ii];
pOp->nExec = 0;
pOp->nCycle = 0;
@@ -90875,11 +92690,12 @@ SQLITE_API int sqlite3_found_count = 0;
** sqlite3CantopenError(lineno)
*/
static void test_trace_breakpoint(int pc, Op *pOp, Vdbe *v){
- static int n = 0;
+ static u64 n = 0;
(void)pc;
(void)pOp;
(void)v;
n++;
+ if( n==LARGEST_UINT64 ) abort(); /* So that n is used, preventing a warning */
}
#endif
@@ -91299,6 +93115,9 @@ SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, StrAccum *pStr){
sqlite3_str_appendchar(pStr, 1, (c>=0x20&&c<=0x7f) ? c : '.');
}
sqlite3_str_appendf(pStr, "]%s", encnames[pMem->enc]);
+ if( f & MEM_Term ){
+ sqlite3_str_appendf(pStr, "(0-term)");
+ }
}
}
#endif
@@ -91435,6 +93254,93 @@ static u64 filterHash(const Mem *aMem, const Op *pOp){
return h;
}
+
+/*
+** For OP_Column, factor out the case where content is loaded from
+** overflow pages, so that the code to implement this case is separate
+** the common case where all content fits on the page. Factoring out
+** the code reduces register pressure and helps the common case
+** to run faster.
+*/
+static SQLITE_NOINLINE int vdbeColumnFromOverflow(
+ VdbeCursor *pC, /* The BTree cursor from which we are reading */
+ int iCol, /* The column to read */
+ int t, /* The serial-type code for the column value */
+ i64 iOffset, /* Offset to the start of the content value */
+ u32 cacheStatus, /* Current Vdbe.cacheCtr value */
+ u32 colCacheCtr, /* Current value of the column cache counter */
+ Mem *pDest /* Store the value into this register. */
+){
+ int rc;
+ sqlite3 *db = pDest->db;
+ int encoding = pDest->enc;
+ int len = sqlite3VdbeSerialTypeLen(t);
+ assert( pC->eCurType==CURTYPE_BTREE );
+ if( len>db->aLimit[SQLITE_LIMIT_LENGTH] ) return SQLITE_TOOBIG;
+ if( len > 4000 && pC->pKeyInfo==0 ){
+ /* Cache large column values that are on overflow pages using
+ ** an RCStr (reference counted string) so that if they are reloaded,
+ ** that do not have to be copied a second time. The overhead of
+ ** creating and managing the cache is such that this is only
+ ** profitable for larger TEXT and BLOB values.
+ **
+ ** Only do this on table-btrees so that writes to index-btrees do not
+ ** need to clear the cache. This buys performance in the common case
+ ** in exchange for generality.
+ */
+ VdbeTxtBlbCache *pCache;
+ char *pBuf;
+ if( pC->colCache==0 ){
+ pC->pCache = sqlite3DbMallocZero(db, sizeof(VdbeTxtBlbCache) );
+ if( pC->pCache==0 ) return SQLITE_NOMEM;
+ pC->colCache = 1;
+ }
+ pCache = pC->pCache;
+ if( pCache->pCValue==0
+ || pCache->iCol!=iCol
+ || pCache->cacheStatus!=cacheStatus
+ || pCache->colCacheCtr!=colCacheCtr
+ || pCache->iOffset!=sqlite3BtreeOffset(pC->uc.pCursor)
+ ){
+ if( pCache->pCValue ) sqlite3RCStrUnref(pCache->pCValue);
+ pBuf = pCache->pCValue = sqlite3RCStrNew( len+3 );
+ if( pBuf==0 ) return SQLITE_NOMEM;
+ rc = sqlite3BtreePayload(pC->uc.pCursor, iOffset, len, pBuf);
+ if( rc ) return rc;
+ pBuf[len] = 0;
+ pBuf[len+1] = 0;
+ pBuf[len+2] = 0;
+ pCache->iCol = iCol;
+ pCache->cacheStatus = cacheStatus;
+ pCache->colCacheCtr = colCacheCtr;
+ pCache->iOffset = sqlite3BtreeOffset(pC->uc.pCursor);
+ }else{
+ pBuf = pCache->pCValue;
+ }
+ assert( t>=12 );
+ sqlite3RCStrRef(pBuf);
+ if( t&1 ){
+ rc = sqlite3VdbeMemSetStr(pDest, pBuf, len, encoding,
+ sqlite3RCStrUnref);
+ pDest->flags |= MEM_Term;
+ }else{
+ rc = sqlite3VdbeMemSetStr(pDest, pBuf, len, 0,
+ sqlite3RCStrUnref);
+ }
+ }else{
+ rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, iOffset, len, pDest);
+ if( rc ) return rc;
+ sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest);
+ if( (t&1)!=0 && encoding==SQLITE_UTF8 ){
+ pDest->z[len] = 0;
+ pDest->flags |= MEM_Term;
+ }
+ }
+ pDest->flags &= ~MEM_Ephem;
+ return rc;
+}
+
+
/*
** Return the symbolic name for the data type of a pMem
*/
@@ -91477,6 +93383,7 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
Mem *pIn2 = 0; /* 2nd input operand */
Mem *pIn3 = 0; /* 3rd input operand */
Mem *pOut = 0; /* Output operand */
+ u32 colCacheCtr = 0; /* Column cache counter */
#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE)
u64 *pnCycle = 0;
int bStmtScanStatus = IS_STMT_SCANSTATUS(db)!=0;
@@ -91672,8 +93579,8 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
case OP_Goto: { /* jump */
#ifdef SQLITE_DEBUG
- /* In debuggging mode, when the p5 flags is set on an OP_Goto, that
- ** means we should really jump back to the preceeding OP_ReleaseReg
+ /* In debugging mode, when the p5 flags is set on an OP_Goto, that
+ ** means we should really jump back to the preceding OP_ReleaseReg
** instruction. */
if( pOp->p5 ){
assert( pOp->p2 < (int)(pOp - aOp) );
@@ -91881,7 +93788,7 @@ case OP_HaltIfNull: { /* in3 */
** P5 is a value between 0 and 4, inclusive, that modifies the P4 string.
**
** 0: (no change)
-** 1: NOT NULL contraint failed: P4
+** 1: NOT NULL constraint failed: P4
** 2: UNIQUE constraint failed: P4
** 3: CHECK constraint failed: P4
** 4: FOREIGN KEY constraint failed: P4
@@ -92685,7 +94592,7 @@ case OP_AddImm: { /* in1 */
pIn1 = &aMem[pOp->p1];
memAboutToChange(p, pIn1);
sqlite3VdbeMemIntegerify(pIn1);
- pIn1->u.i += pOp->p2;
+ *(u64*)&pIn1->u.i += (u64)pOp->p2;
break;
}
@@ -93012,10 +94919,10 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
** opcodes are allowed to occur between this instruction and the previous
** OP_Lt or OP_Gt.
**
-** If result of an OP_Eq comparison on the same two operands as the
-** prior OP_Lt or OP_Gt would have been true, then jump to P2.
-** If the result of an OP_Eq comparison on the two previous
-** operands would have been false or NULL, then fall through.
+** If the result of an OP_Eq comparison on the same two operands as
+** the prior OP_Lt or OP_Gt would have been true, then jump to P2. If
+** the result of an OP_Eq comparison on the two previous operands
+** would have been false or NULL, then fall through.
*/
case OP_ElseEq: { /* same as TK_ESCAPE, jump */
@@ -93445,7 +95352,7 @@ case OP_IsType: { /* jump */
/* Opcode: ZeroOrNull P1 P2 P3 * *
** Synopsis: r[P2] = 0 OR NULL
**
-** If all both registers P1 and P3 are NOT NULL, then store a zero in
+** If both registers P1 and P3 are NOT NULL, then store a zero in
** register P2. If either registers P1 or P3 are NULL then put
** a NULL in register P2.
*/
@@ -93799,11 +95706,16 @@ op_column_restart:
pDest->flags = aFlag[t&1];
}
}else{
+ u8 p5;
pDest->enc = encoding;
+ assert( pDest->db==db );
/* This branch happens only when content is on overflow pages */
- if( ((pOp->p5 & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG))!=0
- && ((t>=12 && (t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0))
- || (len = sqlite3VdbeSerialTypeLen(t))==0
+ if( ((p5 = (pOp->p5 & OPFLAG_BYTELENARG))!=0
+ && (p5==OPFLAG_TYPEOFARG
+ || (t>=12 && ((t&1)==0 || p5==OPFLAG_BYTELENARG))
+ )
+ )
+ || sqlite3VdbeSerialTypeLen(t)==0
){
/* Content is irrelevant for
** 1. the typeof() function,
@@ -93820,11 +95732,13 @@ op_column_restart:
*/
sqlite3VdbeSerialGet((u8*)sqlite3CtypeMap, t, pDest);
}else{
- if( len>db->aLimit[SQLITE_LIMIT_LENGTH] ) goto too_big;
- rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, aOffset[p2], len, pDest);
- if( rc!=SQLITE_OK ) goto abort_due_to_error;
- sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest);
- pDest->flags &= ~MEM_Ephem;
+ rc = vdbeColumnFromOverflow(pC, p2, t, aOffset[p2],
+ p->cacheCtr, colCacheCtr, pDest);
+ if( rc ){
+ if( rc==SQLITE_NOMEM ) goto no_mem;
+ if( rc==SQLITE_TOOBIG ) goto too_big;
+ goto abort_due_to_error;
+ }
}
}
@@ -94286,7 +96200,6 @@ case OP_MakeRecord: {
/* NULL value. No change in zPayload */
}else{
u64 v;
- u32 i;
if( serial_type==7 ){
assert( sizeof(v)==sizeof(pRec->u.r) );
memcpy(&v, &pRec->u.r, sizeof(v));
@@ -94294,12 +96207,17 @@ case OP_MakeRecord: {
}else{
v = pRec->u.i;
}
- len = i = sqlite3SmallTypeSizes[serial_type];
- assert( i>0 );
- while( 1 /*exit-by-break*/ ){
- zPayload[--i] = (u8)(v&0xFF);
- if( i==0 ) break;
- v >>= 8;
+ len = sqlite3SmallTypeSizes[serial_type];
+ assert( len>=1 && len<=8 && len!=5 && len!=7 );
+ switch( len ){
+ default: zPayload[7] = (u8)(v&0xff); v >>= 8;
+ zPayload[6] = (u8)(v&0xff); v >>= 8;
+ case 6: zPayload[5] = (u8)(v&0xff); v >>= 8;
+ zPayload[4] = (u8)(v&0xff); v >>= 8;
+ case 4: zPayload[3] = (u8)(v&0xff); v >>= 8;
+ case 3: zPayload[2] = (u8)(v&0xff); v >>= 8;
+ case 2: zPayload[1] = (u8)(v&0xff); v >>= 8;
+ case 1: zPayload[0] = (u8)(v&0xff);
}
zPayload += len;
}
@@ -95108,7 +97026,7 @@ case OP_OpenEphemeral: { /* ncycle */
}
pCx = p->apCsr[pOp->p1];
if( pCx && !pCx->noReuse && ALWAYS(pOp->p2<=pCx->nField) ){
- /* If the ephermeral table is already open and has no duplicates from
+ /* If the ephemeral table is already open and has no duplicates from
** OP_OpenDup, then erase all existing content so that the table is
** empty again, rather than creating a new table. */
assert( pCx->isEphemeral );
@@ -95599,7 +97517,7 @@ seek_not_found:
** row. If This.P5 is false (0) then a jump is made to SeekGE.P2. If
** This.P5 is true (non-zero) then a jump is made to This.P2. The P5==0
** case occurs when there are no inequality constraints to the right of
-** the IN constraing. The jump to SeekGE.P2 ends the loop. The P5!=0 case
+** the IN constraint. The jump to SeekGE.P2 ends the loop. The P5!=0 case
** occurs when there are inequality constraints to the right of the IN
** operator. In that case, the This.P2 will point either directly to or
** to setup code prior to the OP_IdxGT or OP_IdxGE opcode that checks for
@@ -95607,7 +97525,7 @@ seek_not_found:
**
** Possible outcomes from this opcode:<ol>
**
-** <li> If the cursor is initally not pointed to any valid row, then
+** <li> If the cursor is initially not pointed to any valid row, then
** fall through into the subsequent OP_SeekGE opcode.
**
** <li> If the cursor is left pointing to a row that is before the target
@@ -95839,13 +97757,13 @@ case OP_IfNotOpen: { /* jump */
** operands to OP_NotFound and OP_IdxGT.
**
** This opcode is an optimization attempt only. If this opcode always
-** falls through, the correct answer is still obtained, but extra works
+** falls through, the correct answer is still obtained, but extra work
** is performed.
**
** A value of N in the seekHit flag of cursor P1 means that there exists
** a key P3:N that will match some record in the index. We want to know
** if it is possible for a record P3:P4 to match some record in the
-** index. If it is not possible, we can skips some work. So if seekHit
+** index. If it is not possible, we can skip some work. So if seekHit
** is less than P4, attempt to find out if a match is possible by running
** OP_NotFound.
**
@@ -96357,6 +98275,7 @@ case OP_Insert: {
);
pC->deferredMoveto = 0;
pC->cacheStatus = CACHE_STALE;
+ colCacheCtr++;
/* Invoke the update-hook if required. */
if( rc ) goto abort_due_to_error;
@@ -96410,13 +98329,18 @@ case OP_RowCell: {
** left in an undefined state.
**
** If the OPFLAG_AUXDELETE bit is set on P5, that indicates that this
-** delete one of several associated with deleting a table row and all its
-** associated index entries. Exactly one of those deletes is the "primary"
-** delete. The others are all on OPFLAG_FORDELETE cursors or else are
-** marked with the AUXDELETE flag.
+** delete is one of several associated with deleting a table row and
+** all its associated index entries. Exactly one of those deletes is
+** the "primary" delete. The others are all on OPFLAG_FORDELETE
+** cursors or else are marked with the AUXDELETE flag.
+**
+** If the OPFLAG_NCHANGE (0x01) flag of P2 (NB: P2 not P5) is set, then
+** the row change count is incremented (otherwise not).
**
-** If the OPFLAG_NCHANGE flag of P2 (NB: P2 not P5) is set, then the row
-** change count is incremented (otherwise not).
+** If the OPFLAG_ISNOOP (0x40) flag of P2 (not P5!) is set, then the
+** pre-update-hook for deletes is run, but the btree is otherwise unchanged.
+** This happens when the OP_Delete is to be shortly followed by an OP_Insert
+** with the same key, causing the btree entry to be overwritten.
**
** P1 must not be pseudo-table. It has to be a real table with
** multiple rows.
@@ -96517,6 +98441,7 @@ case OP_Delete: {
rc = sqlite3BtreeDelete(pC->uc.pCursor, pOp->p5);
pC->cacheStatus = CACHE_STALE;
+ colCacheCtr++;
pC->seekResult = 0;
if( rc ) goto abort_due_to_error;
@@ -96584,13 +98509,13 @@ case OP_SorterCompare: {
** Write into register P2 the current sorter data for sorter cursor P1.
** Then clear the column header cache on cursor P3.
**
-** This opcode is normally use to move a record out of the sorter and into
+** This opcode is normally used to move a record out of the sorter and into
** a register that is the source for a pseudo-table cursor created using
** OpenPseudo. That pseudo-table cursor is the one that is identified by
** parameter P3. Clearing the P3 column cache as part of this opcode saves
** us from having to issue a separate NullRow instruction to clear that cache.
*/
-case OP_SorterData: {
+case OP_SorterData: { /* ncycle */
VdbeCursor *pC;
pOut = &aMem[pOp->p2];
@@ -96865,8 +98790,8 @@ case OP_IfSmaller: { /* jump */
** regression tests can determine whether or not the optimizer is
** correctly optimizing out sorts.
*/
-case OP_SorterSort: /* jump */
-case OP_Sort: { /* jump */
+case OP_SorterSort: /* jump ncycle */
+case OP_Sort: { /* jump ncycle */
#ifdef SQLITE_TEST
sqlite3_sort_count++;
sqlite3_search_count--;
@@ -97393,7 +99318,7 @@ case OP_IdxGE: { /* jump, ncycle */
** file is given by P1.
**
** The table being destroyed is in the main database file if P3==0. If
-** P3==1 then the table to be clear is in the auxiliary database file
+** P3==1 then the table to be destroyed is in the auxiliary database file
** that is used to store tables create using CREATE TEMPORARY TABLE.
**
** If AUTOVACUUM is enabled then it is possible that another root page
@@ -97453,8 +99378,8 @@ case OP_Destroy: { /* out2 */
** in the database file is given by P1. But, unlike Destroy, do not
** remove the table or index from the database file.
**
-** The table being clear is in the main database file if P2==0. If
-** P2==1 then the table to be clear is in the auxiliary database file
+** The table being cleared is in the main database file if P2==0. If
+** P2==1 then the table to be cleared is in the auxiliary database file
** that is used to store tables create using CREATE TEMPORARY TABLE.
**
** If the P3 value is non-zero, then the row change count is incremented
@@ -97540,13 +99465,41 @@ case OP_CreateBtree: { /* out2 */
/* Opcode: SqlExec * * * P4 *
**
** Run the SQL statement or statements specified in the P4 string.
+** Disable Auth and Trace callbacks while those statements are running if
+** P1 is true.
*/
case OP_SqlExec: {
+ char *zErr;
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ sqlite3_xauth xAuth;
+#endif
+ u8 mTrace;
+
sqlite3VdbeIncrWriteCounter(p, 0);
db->nSqlExec++;
- rc = sqlite3_exec(db, pOp->p4.z, 0, 0, 0);
+ zErr = 0;
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ xAuth = db->xAuth;
+#endif
+ mTrace = db->mTrace;
+ if( pOp->p1 ){
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ db->xAuth = 0;
+#endif
+ db->mTrace = 0;
+ }
+ rc = sqlite3_exec(db, pOp->p4.z, 0, 0, &zErr);
db->nSqlExec--;
- if( rc ) goto abort_due_to_error;
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ db->xAuth = xAuth;
+#endif
+ db->mTrace = mTrace;
+ if( zErr || rc ){
+ sqlite3VdbeError(p, "%s", zErr);
+ sqlite3_free(zErr);
+ if( rc==SQLITE_NOMEM ) goto no_mem;
+ goto abort_due_to_error;
+ }
break;
}
@@ -98280,7 +100233,7 @@ case OP_AggStep1: {
/* If this function is inside of a trigger, the register array in aMem[]
** might change from one evaluation to the next. The next block of code
** checks to see if the register array has changed, and if so it
- ** reinitializes the relavant parts of the sqlite3_context object */
+ ** reinitializes the relevant parts of the sqlite3_context object */
if( pCtx->pMem != pMem ){
pCtx->pMem = pMem;
for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i];
@@ -98768,6 +100721,52 @@ case OP_VOpen: { /* ncycle */
#endif /* SQLITE_OMIT_VIRTUALTABLE */
#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Opcode: VCheck P1 P2 P3 P4 *
+**
+** P4 is a pointer to a Table object that is a virtual table in schema P1
+** that supports the xIntegrity() method. This opcode runs the xIntegrity()
+** method for that virtual table, using P3 as the integer argument. If
+** an error is reported back, the table name is prepended to the error
+** message and that message is stored in P2. If no errors are seen,
+** register P2 is set to NULL.
+*/
+case OP_VCheck: { /* out2 */
+ Table *pTab;
+ sqlite3_vtab *pVtab;
+ const sqlite3_module *pModule;
+ char *zErr = 0;
+
+ pOut = &aMem[pOp->p2];
+ sqlite3VdbeMemSetNull(pOut); /* Innocent until proven guilty */
+ assert( pOp->p4type==P4_TABLEREF );
+ pTab = pOp->p4.pTab;
+ assert( pTab!=0 );
+ assert( pTab->nTabRef>0 );
+ assert( IsVirtual(pTab) );
+ if( pTab->u.vtab.p==0 ) break;
+ pVtab = pTab->u.vtab.p->pVtab;
+ assert( pVtab!=0 );
+ pModule = pVtab->pModule;
+ assert( pModule!=0 );
+ assert( pModule->iVersion>=4 );
+ assert( pModule->xIntegrity!=0 );
+ sqlite3VtabLock(pTab->u.vtab.p);
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ rc = pModule->xIntegrity(pVtab, db->aDb[pOp->p1].zDbSName, pTab->zName,
+ pOp->p3, &zErr);
+ sqlite3VtabUnlock(pTab->u.vtab.p);
+ if( rc ){
+ sqlite3_free(zErr);
+ goto abort_due_to_error;
+ }
+ if( zErr ){
+ sqlite3VdbeMemSetStr(pOut, zErr, -1, SQLITE_UTF8, sqlite3_free);
+ }
+ break;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
/* Opcode: VInitIn P1 P2 P3 * *
** Synopsis: r[P2]=ValueList(P1,P3)
**
@@ -98880,6 +100879,7 @@ case OP_VColumn: { /* ncycle */
const sqlite3_module *pModule;
Mem *pDest;
sqlite3_context sContext;
+ FuncDef nullFunc;
VdbeCursor *pCur = p->apCsr[pOp->p1];
assert( pCur!=0 );
@@ -98897,6 +100897,9 @@ case OP_VColumn: { /* ncycle */
memset(&sContext, 0, sizeof(sContext));
sContext.pOut = pDest;
sContext.enc = encoding;
+ nullFunc.pUserData = 0;
+ nullFunc.funcFlags = SQLITE_RESULT_SUBTYPE;
+ sContext.pFunc = &nullFunc;
assert( pOp->p5==OPFLAG_NOCHNG || pOp->p5==0 );
if( pOp->p5 & OPFLAG_NOCHNG ){
sqlite3VdbeMemSetNull(pDest);
@@ -99158,7 +101161,7 @@ case OP_MaxPgcnt: { /* out2 */
** This opcode works exactly like OP_Function. The only difference is in
** its name. This opcode is used in places where the function must be
** purely non-deterministic. Some built-in date/time functions can be
-** either determinitic of non-deterministic, depending on their arguments.
+** either deterministic of non-deterministic, depending on their arguments.
** When those function are used in a non-deterministic way, they will check
** to see if they were called using OP_PureFunc instead of OP_Function, and
** if they were, they throw an error.
@@ -99176,7 +101179,7 @@ case OP_Function: { /* group */
/* If this function is inside of a trigger, the register array in aMem[]
** might change from one evaluation to the next. The next block of code
** checks to see if the register array has changed, and if so it
- ** reinitializes the relavant parts of the sqlite3_context object */
+ ** reinitializes the relevant parts of the sqlite3_context object */
pOut = &aMem[pOp->p3];
if( pCtx->pOut != pOut ){
pCtx->pVdbe = p;
@@ -99229,6 +101232,42 @@ case OP_ClrSubtype: { /* in1 */
break;
}
+/* Opcode: GetSubtype P1 P2 * * *
+** Synopsis: r[P2] = r[P1].subtype
+**
+** Extract the subtype value from register P1 and write that subtype
+** into register P2. If P1 has no subtype, then P1 gets a NULL.
+*/
+case OP_GetSubtype: { /* in1 out2 */
+ pIn1 = &aMem[pOp->p1];
+ pOut = &aMem[pOp->p2];
+ if( pIn1->flags & MEM_Subtype ){
+ sqlite3VdbeMemSetInt64(pOut, pIn1->eSubtype);
+ }else{
+ sqlite3VdbeMemSetNull(pOut);
+ }
+ break;
+}
+
+/* Opcode: SetSubtype P1 P2 * * *
+** Synopsis: r[P2].subtype = r[P1]
+**
+** Set the subtype value of register P2 to the integer from register P1.
+** If P1 is NULL, clear the subtype from p2.
+*/
+case OP_SetSubtype: { /* in1 out2 */
+ pIn1 = &aMem[pOp->p1];
+ pOut = &aMem[pOp->p2];
+ if( pIn1->flags & MEM_Null ){
+ pOut->flags &= ~MEM_Subtype;
+ }else{
+ assert( pIn1->flags & MEM_Int );
+ pOut->flags |= MEM_Subtype;
+ pOut->eSubtype = (u8)(pIn1->u.i & 0xff);
+ }
+ break;
+}
+
/* Opcode: FilterAdd P1 * P3 P4 *
** Synopsis: filter(P1) += key(P3@P4)
**
@@ -99252,7 +101291,7 @@ case OP_FilterAdd: {
printf("hash: %llu modulo %d -> %u\n", h, pIn1->n, (int)(h%pIn1->n));
}
#endif
- h %= pIn1->n;
+ h %= (pIn1->n*8);
pIn1->z[h/8] |= 1<<(h&7);
break;
}
@@ -99288,7 +101327,7 @@ case OP_Filter: { /* jump */
printf("hash: %llu modulo %d -> %u\n", h, pIn1->n, (int)(h%pIn1->n));
}
#endif
- h %= pIn1->n;
+ h %= (pIn1->n*8);
if( (pIn1->z[h/8] & (1<<(h&7)))==0 ){
VdbeBranchTaken(1, 2);
p->aCounter[SQLITE_STMTSTATUS_FILTER_HIT]++;
@@ -99540,7 +101579,7 @@ default: { /* This is really OP_Noop, OP_Explain */
}
if( opProperty==0xff ){
/* Never happens. This code exists to avoid a harmless linkage
- ** warning aboud sqlite3VdbeRegisterDump() being defined but not
+ ** warning about sqlite3VdbeRegisterDump() being defined but not
** used. */
sqlite3VdbeRegisterDump(p);
}
@@ -99713,8 +101752,7 @@ static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){
/* Set the value of register r[1] in the SQL statement to integer iRow.
** This is done directly as a performance optimization
*/
- v->aMem[1].flags = MEM_Int;
- v->aMem[1].u.i = iRow;
+ sqlite3VdbeMemSetInt64(&v->aMem[1], iRow);
/* If the statement has been run before (and is paused at the OP_ResultRow)
** then back it up to the point where it does the OP_NotExists. This could
@@ -99797,7 +101835,7 @@ SQLITE_API int sqlite3_blob_open(
#endif
*ppBlob = 0;
#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || zTable==0 ){
+ if( !sqlite3SafetyCheckOk(db) || zTable==0 || zColumn==0 ){
return SQLITE_MISUSE_BKPT;
}
#endif
@@ -100258,7 +102296,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){
** The threshold for the amount of main memory to use before flushing
** records to a PMA is roughly the same as the limit configured for the
** page-cache of the main database. Specifically, the threshold is set to
-** the value returned by "PRAGMA main.page_size" multipled by
+** the value returned by "PRAGMA main.page_size" multiplied by
** that returned by "PRAGMA main.cache_size", in bytes.
**
** If the sorter is running in single-threaded mode, then all PMAs generated
@@ -100281,7 +102319,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){
**
** If there are fewer than SORTER_MAX_MERGE_COUNT PMAs in total and the
** sorter is running in single-threaded mode, then these PMAs are merged
-** incrementally as keys are retreived from the sorter by the VDBE. The
+** incrementally as keys are retrieved from the sorter by the VDBE. The
** MergeEngine object, described in further detail below, performs this
** merge.
**
@@ -100359,7 +102397,7 @@ struct SorterFile {
struct SorterList {
SorterRecord *pList; /* Linked list of records */
u8 *aMemory; /* If non-NULL, bulk memory to hold pList */
- int szPMA; /* Size of pList as PMA in bytes */
+ i64 szPMA; /* Size of pList as PMA in bytes */
};
/*
@@ -100444,7 +102482,7 @@ struct MergeEngine {
**
** Essentially, this structure contains all those fields of the VdbeSorter
** structure for which each thread requires a separate instance. For example,
-** each thread requries its own UnpackedRecord object to unpack records in
+** each thread requeries its own UnpackedRecord object to unpack records in
** as part of comparison operations.
**
** Before a background thread is launched, variable bDone is set to 0. Then,
@@ -100468,10 +102506,10 @@ typedef int (*SorterCompare)(SortSubtask*,int*,const void*,int,const void*,int);
struct SortSubtask {
SQLiteThread *pThread; /* Background thread, if any */
int bDone; /* Set if thread is finished but not joined */
+ int nPMA; /* Number of PMAs currently in file */
VdbeSorter *pSorter; /* Sorter that owns this sub-task */
UnpackedRecord *pUnpacked; /* Space to unpack a record */
SorterList list; /* List for thread to write to a PMA */
- int nPMA; /* Number of PMAs currently in file */
SorterCompare xCompare; /* Compare function to use */
SorterFile file; /* Temp file for level-0 PMAs */
SorterFile file2; /* Space for other PMAs */
@@ -100516,7 +102554,7 @@ struct VdbeSorter {
** PMA, in sorted order. The next key to be read is cached in nKey/aKey.
** aKey might point into aMap or into aBuffer. If neither of those locations
** contain a contiguous representation of the key, then aAlloc is allocated
-** and the key is copied into aAlloc and aKey is made to poitn to aAlloc.
+** and the key is copied into aAlloc and aKey is made to point to aAlloc.
**
** pFd==0 at EOF.
*/
@@ -101887,7 +103925,7 @@ static int vdbeSorterFlushPMA(VdbeSorter *pSorter){
** the background thread from a sub-tasks previous turn is still running,
** skip it. If the first (pSorter->nTask-1) sub-tasks are all still busy,
** fall back to using the final sub-task. The first (pSorter->nTask-1)
- ** sub-tasks are prefered as they use background threads - the final
+ ** sub-tasks are preferred as they use background threads - the final
** sub-task uses the main thread. */
for(i=0; i<nWorker; i++){
int iTest = (pSorter->iPrev + i + 1) % nWorker;
@@ -101945,8 +103983,8 @@ SQLITE_PRIVATE int sqlite3VdbeSorterWrite(
int rc = SQLITE_OK; /* Return Code */
SorterRecord *pNew; /* New list element */
int bFlush; /* True to flush contents of memory to PMA */
- int nReq; /* Bytes of memory required */
- int nPMA; /* Bytes of PMA space required */
+ i64 nReq; /* Bytes of memory required */
+ i64 nPMA; /* Bytes of PMA space required */
int t; /* serial type of first record field */
assert( pCsr->eCurType==CURTYPE_SORTER );
@@ -102371,7 +104409,7 @@ static int vdbePmaReaderIncrMergeInit(PmaReader *pReadr, int eMode){
rc = vdbeMergeEngineInit(pTask, pIncr->pMerger, eMode);
- /* Set up the required files for pIncr. A multi-theaded IncrMerge object
+ /* Set up the required files for pIncr. A multi-threaded IncrMerge object
** requires two temp files to itself, whereas a single-threaded object
** only requires a region of pTask->file2. */
if( rc==SQLITE_OK ){
@@ -103011,6 +105049,8 @@ static int bytecodevtabConnect(
"p5 INT,"
"comment TEXT,"
"subprog TEXT,"
+ "nexec INT,"
+ "ncycle INT,"
"stmt HIDDEN"
");",
@@ -103173,7 +105213,7 @@ static int bytecodevtabColumn(
}
}
}
- i += 10;
+ i += 20;
}
}
switch( i ){
@@ -103223,16 +105263,31 @@ static int bytecodevtabColumn(
}
break;
}
- case 10: /* tables_used.type */
+
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ case 9: /* nexec */
+ sqlite3_result_int(ctx, pOp->nExec);
+ break;
+ case 10: /* ncycle */
+ sqlite3_result_int(ctx, pOp->nCycle);
+ break;
+#else
+ case 9: /* nexec */
+ case 10: /* ncycle */
+ sqlite3_result_int(ctx, 0);
+ break;
+#endif
+
+ case 20: /* tables_used.type */
sqlite3_result_text(ctx, pCur->zType, -1, SQLITE_STATIC);
break;
- case 11: /* tables_used.schema */
+ case 21: /* tables_used.schema */
sqlite3_result_text(ctx, pCur->zSchema, -1, SQLITE_STATIC);
break;
- case 12: /* tables_used.name */
+ case 22: /* tables_used.name */
sqlite3_result_text(ctx, pCur->zName, -1, SQLITE_STATIC);
break;
- case 13: /* tables_used.wr */
+ case 23: /* tables_used.wr */
sqlite3_result_int(ctx, pOp->opcode==OP_OpenWrite);
break;
}
@@ -103306,7 +105361,7 @@ static int bytecodevtabBestIndex(
int rc = SQLITE_CONSTRAINT;
struct sqlite3_index_constraint *p;
bytecodevtab *pVTab = (bytecodevtab*)tab;
- int iBaseCol = pVTab->bTablesUsed ? 4 : 8;
+ int iBaseCol = pVTab->bTablesUsed ? 4 : 10;
pIdxInfo->estimatedCost = (double)100;
pIdxInfo->estimatedRows = 100;
pIdxInfo->idxNum = 0;
@@ -103353,7 +105408,8 @@ static sqlite3_module bytecodevtabModule = {
/* xSavepoint */ 0,
/* xRelease */ 0,
/* xRollbackTo */ 0,
- /* xShadowName */ 0
+ /* xShadowName */ 0,
+ /* xIntegrity */ 0
};
@@ -103877,7 +105933,7 @@ static int walkWindowList(Walker *pWalker, Window *pList, int bOneOnly){
** The return value from this routine is WRC_Abort to abandon the tree walk
** and WRC_Continue to continue.
*/
-static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){
+SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3WalkExprNN(Walker *pWalker, Expr *pExpr){
int rc;
testcase( ExprHasProperty(pExpr, EP_TokenOnly) );
testcase( ExprHasProperty(pExpr, EP_Reduced) );
@@ -103886,7 +105942,9 @@ static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){
if( rc ) return rc & WRC_Abort;
if( !ExprHasProperty(pExpr,(EP_TokenOnly|EP_Leaf)) ){
assert( pExpr->x.pList==0 || pExpr->pRight==0 );
- if( pExpr->pLeft && walkExpr(pWalker, pExpr->pLeft) ) return WRC_Abort;
+ if( pExpr->pLeft && sqlite3WalkExprNN(pWalker, pExpr->pLeft) ){
+ return WRC_Abort;
+ }
if( pExpr->pRight ){
assert( !ExprHasProperty(pExpr, EP_WinFunc) );
pExpr = pExpr->pRight;
@@ -103910,7 +105968,7 @@ static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){
return WRC_Continue;
}
SQLITE_PRIVATE int sqlite3WalkExpr(Walker *pWalker, Expr *pExpr){
- return pExpr ? walkExpr(pWalker,pExpr) : WRC_Continue;
+ return pExpr ? sqlite3WalkExprNN(pWalker,pExpr) : WRC_Continue;
}
/*
@@ -104036,7 +106094,7 @@ SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){
}
/* Increase the walkerDepth when entering a subquery, and
-** descrease when leaving the subquery.
+** decrease when leaving the subquery.
*/
SQLITE_PRIVATE int sqlite3WalkerDepthIncrease(Walker *pWalker, Select *pSelect){
UNUSED_PARAMETER(pSelect);
@@ -104180,21 +106238,36 @@ static void resolveAlias(
}
/*
-** Subqueries stores the original database, table and column names for their
-** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN".
-** Check to see if the zSpan given to this routine matches the zDb, zTab,
-** and zCol. If any of zDb, zTab, and zCol are NULL then those fields will
-** match anything.
+** Subqueries store the original database, table and column names for their
+** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN",
+** and mark the expression-list item by setting ExprList.a[].fg.eEName
+** to ENAME_TAB.
+**
+** Check to see if the zSpan/eEName of the expression-list item passed to this
+** routine matches the zDb, zTab, and zCol. If any of zDb, zTab, and zCol are
+** NULL then those fields will match anything. Return true if there is a match,
+** or false otherwise.
+**
+** SF_NestedFrom subqueries also store an entry for the implicit rowid (or
+** _rowid_, or oid) column by setting ExprList.a[].fg.eEName to ENAME_ROWID,
+** and setting zSpan to "DATABASE.TABLE.<rowid-alias>". This type of pItem
+** argument matches if zCol is a rowid alias. If it is not NULL, (*pbRowid)
+** is set to 1 if there is this kind of match.
*/
SQLITE_PRIVATE int sqlite3MatchEName(
const struct ExprList_item *pItem,
const char *zCol,
const char *zTab,
- const char *zDb
+ const char *zDb,
+ int *pbRowid
){
int n;
const char *zSpan;
- if( pItem->fg.eEName!=ENAME_TAB ) return 0;
+ int eEName = pItem->fg.eEName;
+ if( eEName!=ENAME_TAB && (eEName!=ENAME_ROWID || NEVER(pbRowid==0)) ){
+ return 0;
+ }
+ assert( pbRowid==0 || *pbRowid==0 );
zSpan = pItem->zEName;
for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){}
if( zDb && (sqlite3StrNICmp(zSpan, zDb, n)!=0 || zDb[n]!=0) ){
@@ -104206,9 +106279,11 @@ SQLITE_PRIVATE int sqlite3MatchEName(
return 0;
}
zSpan += n+1;
- if( zCol && sqlite3StrICmp(zSpan, zCol)!=0 ){
- return 0;
+ if( zCol ){
+ if( eEName==ENAME_TAB && sqlite3StrICmp(zSpan, zCol)!=0 ) return 0;
+ if( eEName==ENAME_ROWID && sqlite3IsRowid(zCol)==0 ) return 0;
}
+ if( eEName==ENAME_ROWID ) *pbRowid = 1;
return 1;
}
@@ -104241,6 +106316,7 @@ SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr *pExpr){
assert( ExprUseYTab(pExpr) );
pExTab = pExpr->y.pTab;
assert( pExTab!=0 );
+ assert( n < pExTab->nCol );
if( (pExTab->tabFlags & TF_HasGenerated)!=0
&& (pExTab->aCol[n].colFlags & COLFLAG_GENERATED)!=0
){
@@ -104341,7 +106417,7 @@ static int lookupName(
){
int i, j; /* Loop counters */
int cnt = 0; /* Number of matching column names */
- int cntTab = 0; /* Number of matching table names */
+ int cntTab = 0; /* Number of potential "rowid" matches */
int nSubquery = 0; /* How many levels of subquery */
sqlite3 *db = pParse->db; /* The database connection */
SrcItem *pItem; /* Use for looping over pSrcList items */
@@ -104418,39 +106494,49 @@ static int lookupName(
assert( pEList!=0 );
assert( pEList->nExpr==pTab->nCol );
for(j=0; j<pEList->nExpr; j++){
- if( !sqlite3MatchEName(&pEList->a[j], zCol, zTab, zDb) ){
+ int bRowid = 0; /* True if possible rowid match */
+ if( !sqlite3MatchEName(&pEList->a[j], zCol, zTab, zDb, &bRowid) ){
continue;
}
- if( cnt>0 ){
- if( pItem->fg.isUsing==0
- || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0
- ){
- /* Two or more tables have the same column name which is
- ** not joined by USING. This is an error. Signal as much
- ** by clearing pFJMatch and letting cnt go above 1. */
- sqlite3ExprListDelete(db, pFJMatch);
- pFJMatch = 0;
- }else
- if( (pItem->fg.jointype & JT_RIGHT)==0 ){
- /* An INNER or LEFT JOIN. Use the left-most table */
- continue;
- }else
- if( (pItem->fg.jointype & JT_LEFT)==0 ){
- /* A RIGHT JOIN. Use the right-most table */
- cnt = 0;
- sqlite3ExprListDelete(db, pFJMatch);
- pFJMatch = 0;
- }else{
- /* For a FULL JOIN, we must construct a coalesce() func */
- extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn);
+ if( bRowid==0 ){
+ if( cnt>0 ){
+ if( pItem->fg.isUsing==0
+ || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0
+ ){
+ /* Two or more tables have the same column name which is
+ ** not joined by USING. This is an error. Signal as much
+ ** by clearing pFJMatch and letting cnt go above 1. */
+ sqlite3ExprListDelete(db, pFJMatch);
+ pFJMatch = 0;
+ }else
+ if( (pItem->fg.jointype & JT_RIGHT)==0 ){
+ /* An INNER or LEFT JOIN. Use the left-most table */
+ continue;
+ }else
+ if( (pItem->fg.jointype & JT_LEFT)==0 ){
+ /* A RIGHT JOIN. Use the right-most table */
+ cnt = 0;
+ sqlite3ExprListDelete(db, pFJMatch);
+ pFJMatch = 0;
+ }else{
+ /* For a FULL JOIN, we must construct a coalesce() func */
+ extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn);
+ }
}
+ cnt++;
+ hit = 1;
+ }else if( cnt>0 ){
+ /* This is a potential rowid match, but there has already been
+ ** a real match found. So this can be ignored. */
+ continue;
}
- cnt++;
- cntTab = 2;
+ cntTab++;
pMatch = pItem;
pExpr->iColumn = j;
pEList->a[j].fg.bUsed = 1;
- hit = 1;
+
+ /* rowid cannot be part of a USING clause - assert() this. */
+ assert( bRowid==0 || pEList->a[j].fg.bUsingTerm==0 );
if( pEList->a[j].fg.bUsingTerm ) break;
}
if( hit || zTab==0 ) continue;
@@ -104645,10 +106731,10 @@ static int lookupName(
&& pMatch
&& (pNC->ncFlags & (NC_IdxExpr|NC_GenCol))==0
&& sqlite3IsRowid(zCol)
- && ALWAYS(VisibleRowid(pMatch->pTab))
+ && ALWAYS(VisibleRowid(pMatch->pTab) || pMatch->fg.isNestedFrom)
){
cnt = 1;
- pExpr->iColumn = -1;
+ if( pMatch->fg.isNestedFrom==0 ) pExpr->iColumn = -1;
pExpr->affExpr = SQLITE_AFF_INTEGER;
}
@@ -104807,6 +106893,7 @@ static int lookupName(
sqlite3RecordErrorOffsetOfExpr(pParse->db, pExpr);
pParse->checkSchema = 1;
pTopNC->nNcErr++;
+ eNewExprOp = TK_NULL;
}
assert( pFJMatch==0 );
@@ -104833,7 +106920,7 @@ static int lookupName(
** If a generated column is referenced, set bits for every column
** of the table.
*/
- if( pExpr->iColumn>=0 && pMatch!=0 ){
+ if( pExpr->iColumn>=0 && cnt==1 && pMatch!=0 ){
pMatch->colUsed |= sqlite3ExprColUsed(pExpr);
}
@@ -105101,6 +107188,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
Window *pWin = (IsWindowFunc(pExpr) ? pExpr->y.pWin : 0);
#endif
assert( !ExprHasProperty(pExpr, EP_xIsSelect|EP_IntValue) );
+ assert( pExpr->pLeft==0 || pExpr->pLeft->op==TK_ORDER );
zId = pExpr->u.zToken;
pDef = sqlite3FindFunction(pParse->db, zId, n, enc, 0);
if( pDef==0 ){
@@ -105242,6 +107330,10 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
pNC->nNcErr++;
}
#endif
+ else if( is_agg==0 && pExpr->pLeft ){
+ sqlite3ExprOrderByAggregateError(pParse, pExpr);
+ pNC->nNcErr++;
+ }
if( is_agg ){
/* Window functions may not be arguments of aggregate functions.
** Or arguments of other window functions. But aggregate functions
@@ -105260,6 +107352,11 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
#endif
sqlite3WalkExprList(pWalker, pList);
if( is_agg ){
+ if( pExpr->pLeft ){
+ assert( pExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pExpr->pLeft) );
+ sqlite3WalkExprList(pWalker, pExpr->pLeft->x.pList);
+ }
#ifndef SQLITE_OMIT_WINDOWFUNC
if( pWin ){
Select *pSel = pNC->pWinSelect;
@@ -105288,11 +107385,12 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
while( pNC2
&& sqlite3ReferencesSrcList(pParse, pExpr, pNC2->pSrcList)==0
){
- pExpr->op2++;
+ pExpr->op2 += (1 + pNC2->nNestedSelect);
pNC2 = pNC2->pNext;
}
assert( pDef!=0 || IN_RENAME_OBJECT );
if( pNC2 && pDef ){
+ pExpr->op2 += pNC2->nNestedSelect;
assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg );
assert( SQLITE_FUNC_ANYORDER==NC_OrderAgg );
testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 );
@@ -105770,7 +107868,7 @@ static int resolveOrderGroupBy(
}
for(j=0; j<pSelect->pEList->nExpr; j++){
if( sqlite3ExprCompare(0, pE, pSelect->pEList->a[j].pExpr, -1)==0 ){
- /* Since this expresion is being changed into a reference
+ /* Since this expression is being changed into a reference
** to an identical expression in the result set, remove all Window
** objects belonging to the expression from the Select.pWin list. */
windowRemoveExprFromSelect(pSelect, pE);
@@ -105823,10 +107921,8 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
while( p ){
assert( (p->selFlags & SF_Expanded)!=0 );
assert( (p->selFlags & SF_Resolved)==0 );
- assert( db->suppressErr==0 ); /* SF_Resolved not set if errors suppressed */
p->selFlags |= SF_Resolved;
-
/* Resolve the expressions in the LIMIT and OFFSET clauses. These
** are not allowed to refer to any names, so pass an empty NameContext.
*/
@@ -105853,6 +107949,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
/* Recursively resolve names in all subqueries in the FROM clause
*/
+ if( pOuterNC ) pOuterNC->nNestedSelect++;
for(i=0; i<p->pSrc->nSrc; i++){
SrcItem *pItem = &p->pSrc->a[i];
if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){
@@ -105877,6 +107974,9 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
}
}
}
+ if( pOuterNC && ALWAYS(pOuterNC->nNestedSelect>0) ){
+ pOuterNC->nNestedSelect--;
+ }
/* Set up the local name-context to pass to sqlite3ResolveExprNames() to
** resolve the result-set expression list.
@@ -106093,7 +108193,8 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames(
return SQLITE_ERROR;
}
#endif
- sqlite3WalkExpr(&w, pExpr);
+ assert( pExpr!=0 );
+ sqlite3WalkExprNN(&w, pExpr);
#if SQLITE_MAX_EXPR_DEPTH>0
w.pParse->nHeight -= pExpr->nHeight;
#endif
@@ -106135,7 +108236,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames(
return WRC_Abort;
}
#endif
- sqlite3WalkExpr(&w, pExpr);
+ sqlite3WalkExprNN(&w, pExpr);
#if SQLITE_MAX_EXPR_DEPTH>0
w.pParse->nHeight -= pExpr->nHeight;
#endif
@@ -106157,7 +108258,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames(
/*
** Resolve all names in all expressions of a SELECT and in all
-** decendents of the SELECT, including compounds off of p->pPrior,
+** descendants of the SELECT, including compounds off of p->pPrior,
** subqueries in expressions, and subqueries used as FROM clause
** terms.
**
@@ -106307,6 +108408,7 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){
if( op==TK_SELECT_COLUMN ){
assert( pExpr->pLeft!=0 && ExprUseXSelect(pExpr->pLeft) );
assert( pExpr->iColumn < pExpr->iTable );
+ assert( pExpr->iColumn >= 0 );
assert( pExpr->iTable==pExpr->pLeft->x.pSelect->pEList->nExpr );
return sqlite3ExprAffinity(
pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr
@@ -106543,7 +108645,7 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){
/*
** Return the collation sequence for the expression pExpr. If
** there is no defined collating sequence, return a pointer to the
-** defautl collation sequence.
+** default collation sequence.
**
** See also: sqlite3ExprCollSeq()
**
@@ -106673,7 +108775,7 @@ SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq(
return pColl;
}
-/* Expresssion p is a comparison operator. Return a collation sequence
+/* Expression p is a comparison operator. Return a collation sequence
** appropriate for the comparison operator.
**
** This is normally just a wrapper around sqlite3BinaryCompareCollSeq().
@@ -106830,6 +108932,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(
*/
pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0);
if( pRet ){
+ ExprSetProperty(pRet, EP_FullSize);
pRet->iTable = nField;
pRet->iColumn = iField;
pRet->pLeft = pVector;
@@ -107130,6 +109233,15 @@ SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){
#endif /* SQLITE_MAX_EXPR_DEPTH>0 */
/*
+** Set the error offset for an Expr node, if possible.
+*/
+SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr *pExpr, int iOfst){
+ if( pExpr==0 ) return;
+ if( NEVER(ExprUseWJoin(pExpr)) ) return;
+ pExpr->w.iOfst = iOfst;
+}
+
+/*
** This routine is the core allocator for Expr nodes.
**
** Construct a new expression node and return a pointer to it. Memory
@@ -107412,6 +109524,67 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(
}
/*
+** Report an error when attempting to use an ORDER BY clause within
+** the arguments of a non-aggregate function.
+*/
+SQLITE_PRIVATE void sqlite3ExprOrderByAggregateError(Parse *pParse, Expr *p){
+ sqlite3ErrorMsg(pParse,
+ "ORDER BY may not be used with non-aggregate %#T()", p
+ );
+}
+
+/*
+** Attach an ORDER BY clause to a function call.
+**
+** functionname( arguments ORDER BY sortlist )
+** \_____________________/ \______/
+** pExpr pOrderBy
+**
+** The ORDER BY clause is inserted into a new Expr node of type TK_ORDER
+** and added to the Expr.pLeft field of the parent TK_FUNCTION node.
+*/
+SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy(
+ Parse *pParse, /* Parsing context */
+ Expr *pExpr, /* The function call to which ORDER BY is to be added */
+ ExprList *pOrderBy /* The ORDER BY clause to add */
+){
+ Expr *pOB;
+ sqlite3 *db = pParse->db;
+ if( NEVER(pOrderBy==0) ){
+ assert( db->mallocFailed );
+ return;
+ }
+ if( pExpr==0 ){
+ assert( db->mallocFailed );
+ sqlite3ExprListDelete(db, pOrderBy);
+ return;
+ }
+ assert( pExpr->op==TK_FUNCTION );
+ assert( pExpr->pLeft==0 );
+ assert( ExprUseXList(pExpr) );
+ if( pExpr->x.pList==0 || NEVER(pExpr->x.pList->nExpr==0) ){
+ /* Ignore ORDER BY on zero-argument aggregates */
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pOrderBy);
+ return;
+ }
+ if( IsWindowFunc(pExpr) ){
+ sqlite3ExprOrderByAggregateError(pParse, pExpr);
+ sqlite3ExprListDelete(db, pOrderBy);
+ return;
+ }
+
+ pOB = sqlite3ExprAlloc(db, TK_ORDER, 0, 0);
+ if( pOB==0 ){
+ sqlite3ExprListDelete(db, pOrderBy);
+ return;
+ }
+ pOB->x.pList = pOrderBy;
+ assert( ExprUseXList(pOB) );
+ pExpr->pLeft = pOB;
+ ExprSetProperty(pOB, EP_FullSize);
+}
+
+/*
** Check to see if a function is usable according to current access
** rules:
**
@@ -107572,6 +109745,9 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){
SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){
if( p ) sqlite3ExprDeleteNN(db, p);
}
+SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3 *db, void *p){
+ if( ALWAYS(p) ) sqlite3ExprDeleteNN(db, (Expr*)p);
+}
/*
** Clear both elements of an OnOrUsing object
@@ -107589,7 +109765,7 @@ SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){
/*
** Arrange to cause pExpr to be deleted when the pParse is deleted.
** This is similar to sqlite3ExprDelete() except that the delete is
-** deferred untilthe pParse is deleted.
+** deferred until the pParse is deleted.
**
** The pExpr might be deleted immediately on an OOM error.
**
@@ -107597,9 +109773,7 @@ SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){
** pExpr to the pParse->pConstExpr list with a register number of 0.
*/
SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3ExprDelete,
- pExpr);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr);
}
/* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the
@@ -107664,11 +109838,7 @@ static int dupedExprStructSize(const Expr *p, int flags){
assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */
assert( EXPR_FULLSIZE<=0xfff );
assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 );
- if( 0==flags || p->op==TK_SELECT_COLUMN
-#ifndef SQLITE_OMIT_WINDOWFUNC
- || ExprHasProperty(p, EP_WinFunc)
-#endif
- ){
+ if( 0==flags || ExprHasProperty(p, EP_FullSize) ){
nSize = EXPR_FULLSIZE;
}else{
assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) );
@@ -107699,56 +109869,93 @@ static int dupedExprNodeSize(const Expr *p, int flags){
/*
** Return the number of bytes required to create a duplicate of the
-** expression passed as the first argument. The second argument is a
-** mask containing EXPRDUP_XXX flags.
+** expression passed as the first argument.
**
** The value returned includes space to create a copy of the Expr struct
** itself and the buffer referred to by Expr.u.zToken, if any.
**
-** If the EXPRDUP_REDUCE flag is set, then the return value includes
-** space to duplicate all Expr nodes in the tree formed by Expr.pLeft
-** and Expr.pRight variables (but not for any structures pointed to or
-** descended from the Expr.x.pList or Expr.x.pSelect variables).
+** The return value includes space to duplicate all Expr nodes in the
+** tree formed by Expr.pLeft and Expr.pRight, but not any other
+** substructure such as Expr.x.pList, Expr.x.pSelect, and Expr.y.pWin.
*/
-static int dupedExprSize(const Expr *p, int flags){
- int nByte = 0;
- if( p ){
- nByte = dupedExprNodeSize(p, flags);
- if( flags&EXPRDUP_REDUCE ){
- nByte += dupedExprSize(p->pLeft, flags) + dupedExprSize(p->pRight, flags);
- }
- }
+static int dupedExprSize(const Expr *p){
+ int nByte;
+ assert( p!=0 );
+ nByte = dupedExprNodeSize(p, EXPRDUP_REDUCE);
+ if( p->pLeft ) nByte += dupedExprSize(p->pLeft);
+ if( p->pRight ) nByte += dupedExprSize(p->pRight);
+ assert( nByte==ROUND8(nByte) );
return nByte;
}
/*
-** This function is similar to sqlite3ExprDup(), except that if pzBuffer
-** is not NULL then *pzBuffer is assumed to point to a buffer large enough
-** to store the copy of expression p, the copies of p->u.zToken
-** (if applicable), and the copies of the p->pLeft and p->pRight expressions,
-** if any. Before returning, *pzBuffer is set to the first byte past the
-** portion of the buffer copied into by this function.
+** An EdupBuf is a memory allocation used to stored multiple Expr objects
+** together with their Expr.zToken content. This is used to help implement
+** compression while doing sqlite3ExprDup(). The top-level Expr does the
+** allocation for itself and many of its decendents, then passes an instance
+** of the structure down into exprDup() so that they decendents can have
+** access to that memory.
+*/
+typedef struct EdupBuf EdupBuf;
+struct EdupBuf {
+ u8 *zAlloc; /* Memory space available for storage */
+#ifdef SQLITE_DEBUG
+ u8 *zEnd; /* First byte past the end of memory */
+#endif
+};
+
+/*
+** This function is similar to sqlite3ExprDup(), except that if pEdupBuf
+** is not NULL then it points to memory that can be used to store a copy
+** of the input Expr p together with its p->u.zToken (if any). pEdupBuf
+** is updated with the new buffer tail prior to returning.
*/
-static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){
+static Expr *exprDup(
+ sqlite3 *db, /* Database connection (for memory allocation) */
+ const Expr *p, /* Expr tree to be duplicated */
+ int dupFlags, /* EXPRDUP_REDUCE for compression. 0 if not */
+ EdupBuf *pEdupBuf /* Preallocated storage space, or NULL */
+){
Expr *pNew; /* Value to return */
- u8 *zAlloc; /* Memory space from which to build Expr object */
+ EdupBuf sEdupBuf; /* Memory space from which to build Expr object */
u32 staticFlag; /* EP_Static if space not obtained from malloc */
+ int nToken = -1; /* Space needed for p->u.zToken. -1 means unknown */
assert( db!=0 );
assert( p );
assert( dupFlags==0 || dupFlags==EXPRDUP_REDUCE );
- assert( pzBuffer==0 || dupFlags==EXPRDUP_REDUCE );
+ assert( pEdupBuf==0 || dupFlags==EXPRDUP_REDUCE );
/* Figure out where to write the new Expr structure. */
- if( pzBuffer ){
- zAlloc = *pzBuffer;
+ if( pEdupBuf ){
+ sEdupBuf.zAlloc = pEdupBuf->zAlloc;
+#ifdef SQLITE_DEBUG
+ sEdupBuf.zEnd = pEdupBuf->zEnd;
+#endif
staticFlag = EP_Static;
- assert( zAlloc!=0 );
+ assert( sEdupBuf.zAlloc!=0 );
+ assert( dupFlags==EXPRDUP_REDUCE );
}else{
- zAlloc = sqlite3DbMallocRawNN(db, dupedExprSize(p, dupFlags));
+ int nAlloc;
+ if( dupFlags ){
+ nAlloc = dupedExprSize(p);
+ }else if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){
+ nToken = sqlite3Strlen30NN(p->u.zToken)+1;
+ nAlloc = ROUND8(EXPR_FULLSIZE + nToken);
+ }else{
+ nToken = 0;
+ nAlloc = ROUND8(EXPR_FULLSIZE);
+ }
+ assert( nAlloc==ROUND8(nAlloc) );
+ sEdupBuf.zAlloc = sqlite3DbMallocRawNN(db, nAlloc);
+#ifdef SQLITE_DEBUG
+ sEdupBuf.zEnd = sEdupBuf.zAlloc ? sEdupBuf.zAlloc+nAlloc : 0;
+#endif
+
staticFlag = 0;
}
- pNew = (Expr *)zAlloc;
+ pNew = (Expr *)sEdupBuf.zAlloc;
+ assert( EIGHT_BYTE_ALIGNMENT(pNew) );
if( pNew ){
/* Set nNewSize to the size allocated for the structure pointed to
@@ -107757,22 +109964,27 @@ static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){
** by the copy of the p->u.zToken string (if any).
*/
const unsigned nStructSize = dupedExprStructSize(p, dupFlags);
- const int nNewSize = nStructSize & 0xfff;
- int nToken;
- if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){
- nToken = sqlite3Strlen30(p->u.zToken) + 1;
- }else{
- nToken = 0;
+ int nNewSize = nStructSize & 0xfff;
+ if( nToken<0 ){
+ if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){
+ nToken = sqlite3Strlen30(p->u.zToken) + 1;
+ }else{
+ nToken = 0;
+ }
}
if( dupFlags ){
+ assert( (int)(sEdupBuf.zEnd - sEdupBuf.zAlloc) >= nNewSize+nToken );
assert( ExprHasProperty(p, EP_Reduced)==0 );
- memcpy(zAlloc, p, nNewSize);
+ memcpy(sEdupBuf.zAlloc, p, nNewSize);
}else{
u32 nSize = (u32)exprStructSize(p);
- memcpy(zAlloc, p, nSize);
+ assert( (int)(sEdupBuf.zEnd - sEdupBuf.zAlloc) >=
+ (int)EXPR_FULLSIZE+nToken );
+ memcpy(sEdupBuf.zAlloc, p, nSize);
if( nSize<EXPR_FULLSIZE ){
- memset(&zAlloc[nSize], 0, EXPR_FULLSIZE-nSize);
+ memset(&sEdupBuf.zAlloc[nSize], 0, EXPR_FULLSIZE-nSize);
}
+ nNewSize = EXPR_FULLSIZE;
}
/* Set the EP_Reduced, EP_TokenOnly, and EP_Static flags appropriately. */
@@ -107785,44 +109997,50 @@ static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){
}
/* Copy the p->u.zToken string, if any. */
- if( nToken ){
- char *zToken = pNew->u.zToken = (char*)&zAlloc[nNewSize];
+ assert( nToken>=0 );
+ if( nToken>0 ){
+ char *zToken = pNew->u.zToken = (char*)&sEdupBuf.zAlloc[nNewSize];
memcpy(zToken, p->u.zToken, nToken);
+ nNewSize += nToken;
}
+ sEdupBuf.zAlloc += ROUND8(nNewSize);
+
+ if( ((p->flags|pNew->flags)&(EP_TokenOnly|EP_Leaf))==0 ){
- if( 0==((p->flags|pNew->flags) & (EP_TokenOnly|EP_Leaf)) ){
/* Fill in the pNew->x.pSelect or pNew->x.pList member. */
if( ExprUseXSelect(p) ){
pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, dupFlags);
}else{
- pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, dupFlags);
+ pNew->x.pList = sqlite3ExprListDup(db, p->x.pList,
+ p->op!=TK_ORDER ? dupFlags : 0);
}
- }
- /* Fill in pNew->pLeft and pNew->pRight. */
- if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly|EP_WinFunc) ){
- zAlloc += dupedExprNodeSize(p, dupFlags);
- if( !ExprHasProperty(pNew, EP_TokenOnly|EP_Leaf) ){
- pNew->pLeft = p->pLeft ?
- exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc) : 0;
- pNew->pRight = p->pRight ?
- exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc) : 0;
- }
#ifndef SQLITE_OMIT_WINDOWFUNC
if( ExprHasProperty(p, EP_WinFunc) ){
pNew->y.pWin = sqlite3WindowDup(db, pNew, p->y.pWin);
assert( ExprHasProperty(pNew, EP_WinFunc) );
}
#endif /* SQLITE_OMIT_WINDOWFUNC */
- if( pzBuffer ){
- *pzBuffer = zAlloc;
- }
- }else{
- if( !ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){
- if( pNew->op==TK_SELECT_COLUMN ){
+
+ /* Fill in pNew->pLeft and pNew->pRight. */
+ if( dupFlags ){
+ if( p->op==TK_SELECT_COLUMN ){
pNew->pLeft = p->pLeft;
- assert( p->pRight==0 || p->pRight==p->pLeft
- || ExprHasProperty(p->pLeft, EP_Subquery) );
+ assert( p->pRight==0
+ || p->pRight==p->pLeft
+ || ExprHasProperty(p->pLeft, EP_Subquery) );
+ }else{
+ pNew->pLeft = p->pLeft ?
+ exprDup(db, p->pLeft, EXPRDUP_REDUCE, &sEdupBuf) : 0;
+ }
+ pNew->pRight = p->pRight ?
+ exprDup(db, p->pRight, EXPRDUP_REDUCE, &sEdupBuf) : 0;
+ }else{
+ if( p->op==TK_SELECT_COLUMN ){
+ pNew->pLeft = p->pLeft;
+ assert( p->pRight==0
+ || p->pRight==p->pLeft
+ || ExprHasProperty(p->pLeft, EP_Subquery) );
}else{
pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0);
}
@@ -107830,6 +110048,8 @@ static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){
}
}
}
+ if( pEdupBuf ) memcpy(pEdupBuf, &sEdupBuf, sizeof(sEdupBuf));
+ assert( sEdupBuf.zAlloc <= sEdupBuf.zEnd );
return pNew;
}
@@ -108094,11 +110314,7 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, const Select *p, int flags)
** initially NULL, then create a new expression list.
**
** The pList argument must be either NULL or a pointer to an ExprList
-** obtained from a prior call to sqlite3ExprListAppend(). This routine
-** may not be used with an ExprList obtained from sqlite3ExprListDup().
-** Reason: This routine assumes that the number of slots in pList->a[]
-** is a power of two. That is true for sqlite3ExprListAppend() returns
-** but is not necessarily true from the return value of sqlite3ExprListDup().
+** obtained from a prior call to sqlite3ExprListAppend().
**
** If a memory allocation error occurs, the entire list is freed and
** NULL is returned. If non-NULL is returned, then it is guaranteed
@@ -108363,6 +110579,9 @@ static SQLITE_NOINLINE void exprListDeleteNN(sqlite3 *db, ExprList *pList){
SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){
if( pList ) exprListDeleteNN(db, pList);
}
+SQLITE_PRIVATE void sqlite3ExprListDeleteGeneric(sqlite3 *db, void *pList){
+ if( ALWAYS(pList) ) exprListDeleteNN(db, (ExprList*)pList);
+}
/*
** Return the bitwise-OR of all Expr.flags fields in the given
@@ -108431,7 +110650,7 @@ SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr *pExpr){
** and 0 if it is FALSE.
*/
SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr *pExpr){
- pExpr = sqlite3ExprSkipCollate((Expr*)pExpr);
+ pExpr = sqlite3ExprSkipCollateAndLikely((Expr*)pExpr);
assert( pExpr->op==TK_TRUEFALSE );
assert( !ExprHasProperty(pExpr, EP_IntValue) );
assert( sqlite3StrICmp(pExpr->u.zToken,"true")==0
@@ -108862,9 +111081,10 @@ SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){
case TK_COLUMN:
assert( ExprUseYTab(p) );
return ExprHasProperty(p, EP_CanBeNull) ||
- p->y.pTab==0 || /* Reference to column of index on expression */
+ NEVER(p->y.pTab==0) || /* Reference to column of index on expr */
(p->iColumn>=0
&& p->y.pTab->aCol!=0 /* Possible due to prior error */
+ && ALWAYS(p->iColumn<p->y.pTab->nCol)
&& p->y.pTab->aCol[p->iColumn].notNull==0);
default:
return 1;
@@ -108925,6 +111145,27 @@ SQLITE_PRIVATE int sqlite3IsRowid(const char *z){
}
/*
+** Return a pointer to a buffer containing a usable rowid alias for table
+** pTab. An alias is usable if there is not an explicit user-defined column
+** of the same name.
+*/
+SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab){
+ const char *azOpt[] = {"_ROWID_", "ROWID", "OID"};
+ int ii;
+ assert( VisibleRowid(pTab) );
+ for(ii=0; ii<ArraySize(azOpt); ii++){
+ int iCol;
+ for(iCol=0; iCol<pTab->nCol; iCol++){
+ if( sqlite3_stricmp(azOpt[ii], pTab->aCol[iCol].zCnName)==0 ) break;
+ }
+ if( iCol==pTab->nCol ){
+ return azOpt[ii];
+ }
+ }
+ return 0;
+}
+
+/*
** pX is the RHS of an IN operator. If pX is a SELECT statement
** that can be simplified to a direct table access, then return
** a pointer to the SELECT statement. If pX is not a SELECT statement,
@@ -109024,7 +111265,7 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
** IN_INDEX_INDEX_ASC - The cursor was opened on an ascending index.
** IN_INDEX_INDEX_DESC - The cursor was opened on a descending index.
** IN_INDEX_EPH - The cursor was opened on a specially created and
-** populated epheremal table.
+** populated ephemeral table.
** IN_INDEX_NOOP - No cursor was allocated. The IN operator must be
** implemented as a sequence of comparisons.
**
@@ -109037,7 +111278,7 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
** an ephemeral table might need to be generated from the RHS and then
** pX->iTable made to point to the ephemeral table instead of an
** existing table. In this case, the creation and initialization of the
-** ephmeral table might be put inside of a subroutine, the EP_Subrtn flag
+** ephemeral table might be put inside of a subroutine, the EP_Subrtn flag
** will be set on pX and the pX->y.sub fields will be set to show where
** the subroutine is coded.
**
@@ -109049,12 +111290,12 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
**
** When IN_INDEX_LOOP is used (and the b-tree will be used to iterate
** through the set members) then the b-tree must not contain duplicates.
-** An epheremal table will be created unless the selected columns are guaranteed
+** An ephemeral table will be created unless the selected columns are guaranteed
** to be unique - either because it is an INTEGER PRIMARY KEY or due to
** a UNIQUE constraint or index.
**
** When IN_INDEX_MEMBERSHIP is used (and the b-tree will be used
-** for fast set membership tests) then an epheremal table must
+** for fast set membership tests) then an ephemeral table must
** be used unless <columns> is a single INTEGER PRIMARY KEY column or an
** index can be found with the specified <columns> as its left-most.
**
@@ -109387,7 +111628,7 @@ SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse *pParse, Expr *pExpr){
** x IN (SELECT a FROM b) -- IN operator with subquery on the right
**
** The pExpr parameter is the IN operator. The cursor number for the
-** constructed ephermeral table is returned. The first time the ephemeral
+** constructed ephemeral table is returned. The first time the ephemeral
** table is computed, the cursor number is also stored in pExpr->iTable,
** however the cursor number returned might not be the same, as it might
** have been duplicated using OP_OpenDup.
@@ -110202,10 +112443,13 @@ SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(
u8 p5 /* P5 value for OP_Column + FLAGS */
){
assert( pParse->pVdbe!=0 );
+ assert( (p5 & (OPFLAG_NOCHNG|OPFLAG_TYPEOFARG|OPFLAG_LENGTHARG))==p5 );
+ assert( IsVirtual(pTab) || (p5 & OPFLAG_NOCHNG)==0 );
sqlite3ExprCodeGetColumnOfTable(pParse->pVdbe, pTab, iTable, iColumn, iReg);
if( p5 ){
VdbeOp *pOp = sqlite3VdbeGetLastOp(pParse->pVdbe);
if( pOp->opcode==OP_Column ) pOp->p5 = p5;
+ if( pOp->opcode==OP_VColumn ) pOp->p5 = (p5 & OPFLAG_NOCHNG);
}
return iReg;
}
@@ -110234,7 +112478,7 @@ static void exprToRegister(Expr *pExpr, int iReg){
/*
** Evaluate an expression (either a vector or a scalar expression) and store
-** the result in continguous temporary registers. Return the index of
+** the result in contiguous temporary registers. Return the index of
** the first register used to store the result.
**
** If the returned result register is a temporary scalar, then also write
@@ -110274,7 +112518,7 @@ static int exprCodeVector(Parse *pParse, Expr *p, int *piFreeable){
*/
static void setDoNotMergeFlagOnCopy(Vdbe *v){
if( sqlite3VdbeGetLastOp(v)->opcode==OP_Copy ){
- sqlite3VdbeChangeP5(v, 1); /* Tag trailing OP_Copy as not mergable */
+ sqlite3VdbeChangeP5(v, 1); /* Tag trailing OP_Copy as not mergeable */
}
}
@@ -110364,13 +112608,13 @@ static int exprCodeInlineFunction(
}
case INLINEFUNC_implies_nonnull_row: {
- /* REsult of sqlite3ExprImpliesNonNullRow() */
+ /* Result of sqlite3ExprImpliesNonNullRow() */
Expr *pA1;
assert( nFarg==2 );
pA1 = pFarg->a[1].pExpr;
if( pA1->op==TK_COLUMN ){
sqlite3VdbeAddOp2(v, OP_Integer,
- sqlite3ExprImpliesNonNullRow(pFarg->a[0].pExpr,pA1->iTable),
+ sqlite3ExprImpliesNonNullRow(pFarg->a[0].pExpr,pA1->iTable,1),
target);
}else{
sqlite3VdbeAddOp2(v, OP_Null, 0, target);
@@ -110459,6 +112703,41 @@ static SQLITE_NOINLINE int sqlite3IndexedExprLookup(
/*
+** Expresion pExpr is guaranteed to be a TK_COLUMN or equivalent. This
+** function checks the Parse.pIdxPartExpr list to see if this column
+** can be replaced with a constant value. If so, it generates code to
+** put the constant value in a register (ideally, but not necessarily,
+** register iTarget) and returns the register number.
+**
+** Or, if the TK_COLUMN cannot be replaced by a constant, zero is
+** returned.
+*/
+static int exprPartidxExprLookup(Parse *pParse, Expr *pExpr, int iTarget){
+ IndexedExpr *p;
+ for(p=pParse->pIdxPartExpr; p; p=p->pIENext){
+ if( pExpr->iColumn==p->iIdxCol && pExpr->iTable==p->iDataCur ){
+ Vdbe *v = pParse->pVdbe;
+ int addr = 0;
+ int ret;
+
+ if( p->bMaybeNullRow ){
+ addr = sqlite3VdbeAddOp1(v, OP_IfNullRow, p->iIdxCur);
+ }
+ ret = sqlite3ExprCodeTarget(pParse, p->pExpr, iTarget);
+ sqlite3VdbeAddOp4(pParse->pVdbe, OP_Affinity, ret, 1, 0,
+ (const char*)&p->aff, 1);
+ if( addr ){
+ sqlite3VdbeJumpHere(v, addr);
+ sqlite3VdbeChangeP3(v, addr, ret);
+ }
+ return ret;
+ }
+ }
+ return 0;
+}
+
+
+/*
** Generate code into the current Vdbe to evaluate the given
** expression. Attempt to store the results in register "target".
** Return the register where results are stored.
@@ -110494,6 +112773,7 @@ expr_code_doover:
assert( !ExprHasVVAProperty(pExpr,EP_Immutable) );
op = pExpr->op;
}
+ assert( op!=TK_ORDER );
switch( op ){
case TK_AGG_COLUMN: {
AggInfo *pAggInfo = pExpr->pAggInfo;
@@ -110507,7 +112787,7 @@ expr_code_doover:
#ifdef SQLITE_VDBE_COVERAGE
/* Verify that the OP_Null above is exercised by tests
** tag-20230325-2 */
- sqlite3VdbeAddOp2(v, OP_NotNull, target, 1);
+ sqlite3VdbeAddOp3(v, OP_NotNull, target, 1, 20230325);
VdbeCoverageNeverTaken(v);
#endif
break;
@@ -110546,7 +112826,7 @@ expr_code_doover:
if( ExprHasProperty(pExpr, EP_FixedCol) ){
/* This COLUMN expression is really a constant due to WHERE clause
** constraints, and that constant is coded by the pExpr->pLeft
- ** expresssion. However, make sure the constant has the correct
+ ** expression. However, make sure the constant has the correct
** datatype by applying the Affinity of the table column to the
** constant.
*/
@@ -110615,6 +112895,11 @@ expr_code_doover:
iTab = pParse->iSelfTab - 1;
}
}
+ else if( pParse->pIdxPartExpr
+ && 0!=(r1 = exprPartidxExprLookup(pParse, pExpr, target))
+ ){
+ return r1;
+ }
assert( ExprUseYTab(pExpr) );
assert( pExpr->y.pTab!=0 );
iReg = sqlite3ExprCodeGetColumn(pParse, pExpr->y.pTab,
@@ -110872,7 +113157,7 @@ expr_code_doover:
sqlite3ErrorMsg(pParse, "unknown function: %#T()", pExpr);
break;
}
- if( pDef->funcFlags & SQLITE_FUNC_INLINE ){
+ if( (pDef->funcFlags & SQLITE_FUNC_INLINE)!=0 && ALWAYS(pFarg!=0) ){
assert( (pDef->funcFlags & SQLITE_FUNC_UNSAFE)==0 );
assert( (pDef->funcFlags & SQLITE_FUNC_DIRECT)==0 );
return exprCodeInlineFunction(pParse, pFarg,
@@ -110898,10 +113183,10 @@ expr_code_doover:
r1 = sqlite3GetTempRange(pParse, nFarg);
}
- /* For length() and typeof() functions with a column argument,
+ /* For length() and typeof() and octet_length() functions,
** set the P5 parameter to the OP_Column opcode to OPFLAG_LENGTHARG
- ** or OPFLAG_TYPEOFARG respectively, to avoid unnecessary data
- ** loading.
+ ** or OPFLAG_TYPEOFARG or OPFLAG_BYTELENARG respectively, to avoid
+ ** unnecessary data loading.
*/
if( (pDef->funcFlags & (SQLITE_FUNC_LENGTH|SQLITE_FUNC_TYPEOF))!=0 ){
u8 exprOp;
@@ -110911,14 +113196,16 @@ expr_code_doover:
if( exprOp==TK_COLUMN || exprOp==TK_AGG_COLUMN ){
assert( SQLITE_FUNC_LENGTH==OPFLAG_LENGTHARG );
assert( SQLITE_FUNC_TYPEOF==OPFLAG_TYPEOFARG );
- testcase( pDef->funcFlags & OPFLAG_LENGTHARG );
- pFarg->a[0].pExpr->op2 =
- pDef->funcFlags & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG);
+ assert( SQLITE_FUNC_BYTELEN==OPFLAG_BYTELENARG );
+ assert( (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG)==OPFLAG_BYTELENARG );
+ testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_LENGTHARG );
+ testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_TYPEOFARG );
+ testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_BYTELENARG);
+ pFarg->a[0].pExpr->op2 = pDef->funcFlags & OPFLAG_BYTELENARG;
}
}
- sqlite3ExprCodeExprList(pParse, pFarg, r1, 0,
- SQLITE_ECEL_DUP|SQLITE_ECEL_FACTOR);
+ sqlite3ExprCodeExprList(pParse, pFarg, r1, 0, SQLITE_ECEL_FACTOR);
}else{
r1 = 0;
}
@@ -111273,9 +113560,9 @@ expr_code_doover:
** once. If no functions are involved, then factor the code out and put it at
** the end of the prepared statement in the initialization section.
**
-** If regDest>=0 then the result is always stored in that register and the
+** If regDest>0 then the result is always stored in that register and the
** result is not reusable. If regDest<0 then this routine is free to
-** store the value whereever it wants. The register where the expression
+** store the value wherever it wants. The register where the expression
** is stored is returned. When regDest<0, two identical expressions might
** code to the same register, if they do not contain function calls and hence
** are factored out into the initialization section at the end of the
@@ -111288,6 +113575,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeRunJustOnce(
){
ExprList *p;
assert( ConstFactorOk(pParse) );
+ assert( regDest!=0 );
p = pParse->pConstExpr;
if( regDest<0 && p ){
struct ExprList_item *pItem;
@@ -111378,8 +113666,10 @@ SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){
inReg = sqlite3ExprCodeTarget(pParse, pExpr, target);
if( inReg!=target ){
u8 op;
- if( ALWAYS(pExpr)
- && (ExprHasProperty(pExpr,EP_Subquery) || pExpr->op==TK_REGISTER)
+ Expr *pX = sqlite3ExprSkipCollateAndLikely(pExpr);
+ testcase( pX!=pExpr );
+ if( ALWAYS(pX)
+ && (ExprHasProperty(pX,EP_Subquery) || pX->op==TK_REGISTER)
){
op = OP_Copy;
}else{
@@ -112099,8 +114389,8 @@ SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList *pA, const ExprList *pB
*/
SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr *pA,Expr *pB, int iTab){
return sqlite3ExprCompare(0,
- sqlite3ExprSkipCollateAndLikely(pA),
- sqlite3ExprSkipCollateAndLikely(pB),
+ sqlite3ExprSkipCollate(pA),
+ sqlite3ExprSkipCollate(pB),
iTab);
}
@@ -112193,7 +114483,7 @@ static int exprImpliesNotNull(
** pE1: x!=123 pE2: x IS NOT NULL Result: true
** pE1: x!=?1 pE2: x IS NOT NULL Result: true
** pE1: x IS NULL pE2: x IS NOT NULL Result: false
-** pE1: x IS ?2 pE2: x IS NOT NULL Reuslt: false
+** pE1: x IS ?2 pE2: x IS NOT NULL Result: false
**
** When comparing TK_COLUMN nodes between pE1 and pE2, if pE2 has
** Expr.iTable<0 then assume a table number given by iTab.
@@ -112230,11 +114520,29 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr(
return 0;
}
+/* This is a helper function to impliesNotNullRow(). In this routine,
+** set pWalker->eCode to one only if *both* of the input expressions
+** separately have the implies-not-null-row property.
+*/
+static void bothImplyNotNullRow(Walker *pWalker, Expr *pE1, Expr *pE2){
+ if( pWalker->eCode==0 ){
+ sqlite3WalkExpr(pWalker, pE1);
+ if( pWalker->eCode ){
+ pWalker->eCode = 0;
+ sqlite3WalkExpr(pWalker, pE2);
+ }
+ }
+}
+
/*
** This is the Expr node callback for sqlite3ExprImpliesNonNullRow().
** If the expression node requires that the table at pWalker->iCur
** have one or more non-NULL column, then set pWalker->eCode to 1 and abort.
**
+** pWalker->mWFlags is non-zero if this inquiry is being undertaking on
+** behalf of a RIGHT JOIN (or FULL JOIN). That makes a difference when
+** evaluating terms in the ON clause of an inner join.
+**
** This routine controls an optimization. False positives (setting
** pWalker->eCode to 1 when it should not be) are deadly, but false-negatives
** (never setting pWalker->eCode) is a harmless missed optimization.
@@ -112243,28 +114551,33 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){
testcase( pExpr->op==TK_AGG_COLUMN );
testcase( pExpr->op==TK_AGG_FUNCTION );
if( ExprHasProperty(pExpr, EP_OuterON) ) return WRC_Prune;
+ if( ExprHasProperty(pExpr, EP_InnerON) && pWalker->mWFlags ){
+ /* If iCur is used in an inner-join ON clause to the left of a
+ ** RIGHT JOIN, that does *not* mean that the table must be non-null.
+ ** But it is difficult to check for that condition precisely.
+ ** To keep things simple, any use of iCur from any inner-join is
+ ** ignored while attempting to simplify a RIGHT JOIN. */
+ return WRC_Prune;
+ }
switch( pExpr->op ){
case TK_ISNOT:
case TK_ISNULL:
case TK_NOTNULL:
case TK_IS:
- case TK_OR:
case TK_VECTOR:
- case TK_CASE:
- case TK_IN:
case TK_FUNCTION:
case TK_TRUTH:
+ case TK_CASE:
testcase( pExpr->op==TK_ISNOT );
testcase( pExpr->op==TK_ISNULL );
testcase( pExpr->op==TK_NOTNULL );
testcase( pExpr->op==TK_IS );
- testcase( pExpr->op==TK_OR );
testcase( pExpr->op==TK_VECTOR );
- testcase( pExpr->op==TK_CASE );
- testcase( pExpr->op==TK_IN );
testcase( pExpr->op==TK_FUNCTION );
testcase( pExpr->op==TK_TRUTH );
+ testcase( pExpr->op==TK_CASE );
return WRC_Prune;
+
case TK_COLUMN:
if( pWalker->u.iCur==pExpr->iTable ){
pWalker->eCode = 1;
@@ -112272,21 +114585,38 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){
}
return WRC_Prune;
+ case TK_OR:
case TK_AND:
- if( pWalker->eCode==0 ){
+ /* Both sides of an AND or OR must separately imply non-null-row.
+ ** Consider these cases:
+ ** 1. NOT (x AND y)
+ ** 2. x OR y
+ ** If only one of x or y is non-null-row, then the overall expression
+ ** can be true if the other arm is false (case 1) or true (case 2).
+ */
+ testcase( pExpr->op==TK_OR );
+ testcase( pExpr->op==TK_AND );
+ bothImplyNotNullRow(pWalker, pExpr->pLeft, pExpr->pRight);
+ return WRC_Prune;
+
+ case TK_IN:
+ /* Beware of "x NOT IN ()" and "x NOT IN (SELECT 1 WHERE false)",
+ ** both of which can be true. But apart from these cases, if
+ ** the left-hand side of the IN is NULL then the IN itself will be
+ ** NULL. */
+ if( ExprUseXList(pExpr) && ALWAYS(pExpr->x.pList->nExpr>0) ){
sqlite3WalkExpr(pWalker, pExpr->pLeft);
- if( pWalker->eCode ){
- pWalker->eCode = 0;
- sqlite3WalkExpr(pWalker, pExpr->pRight);
- }
}
return WRC_Prune;
case TK_BETWEEN:
- if( sqlite3WalkExpr(pWalker, pExpr->pLeft)==WRC_Abort ){
- assert( pWalker->eCode );
- return WRC_Abort;
- }
+ /* In "x NOT BETWEEN y AND z" either x must be non-null-row or else
+ ** both y and z must be non-null row */
+ assert( ExprUseXList(pExpr) );
+ assert( pExpr->x.pList->nExpr==2 );
+ sqlite3WalkExpr(pWalker, pExpr->pLeft);
+ bothImplyNotNullRow(pWalker, pExpr->x.pList->a[0].pExpr,
+ pExpr->x.pList->a[1].pExpr);
return WRC_Prune;
/* Virtual tables are allowed to use constraints like x=NULL. So
@@ -112348,7 +114678,7 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){
** be non-NULL, then the LEFT JOIN can be safely converted into an
** ordinary join.
*/
-SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){
+SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab, int isRJ){
Walker w;
p = sqlite3ExprSkipCollateAndLikely(p);
if( p==0 ) return 0;
@@ -112356,7 +114686,7 @@ SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){
p = p->pLeft;
}else{
while( p->op==TK_AND ){
- if( sqlite3ExprImpliesNonNullRow(p->pLeft, iTab) ) return 1;
+ if( sqlite3ExprImpliesNonNullRow(p->pLeft, iTab, isRJ) ) return 1;
p = p->pRight;
}
}
@@ -112364,6 +114694,7 @@ SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){
w.xSelectCallback = 0;
w.xSelectCallback2 = 0;
w.eCode = 0;
+ w.mWFlags = isRJ!=0;
w.u.iCur = iTab;
sqlite3WalkExpr(&w, p);
return w.eCode;
@@ -112424,7 +114755,7 @@ SQLITE_PRIVATE int sqlite3ExprCoveredByIndex(
}
-/* Structure used to pass information throught the Walker in order to
+/* Structure used to pass information throughout the Walker in order to
** implement sqlite3ReferencesSrcList().
*/
struct RefSrcList {
@@ -112531,6 +114862,12 @@ SQLITE_PRIVATE int sqlite3ReferencesSrcList(Parse *pParse, Expr *pExpr, SrcList
assert( pExpr->op==TK_AGG_FUNCTION );
assert( ExprUseXList(pExpr) );
sqlite3WalkExprList(&w, pExpr->x.pList);
+ if( pExpr->pLeft ){
+ assert( pExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pExpr->pLeft) );
+ assert( pExpr->pLeft->x.pList!=0 );
+ sqlite3WalkExprList(&w, pExpr->pLeft->x.pList);
+ }
#ifndef SQLITE_OMIT_WINDOWFUNC
if( ExprHasProperty(pExpr, EP_WinFunc) ){
sqlite3WalkExpr(&w, pExpr->y.pWin->pFilter);
@@ -112640,7 +114977,7 @@ static int addAggInfoFunc(sqlite3 *db, AggInfo *pInfo){
** Return the index in aCol[] of the entry that describes that column.
**
** If no prior entry is found, create a new one and return -1. The
-** new column will have an idex of pAggInfo->nColumn-1.
+** new column will have an index of pAggInfo->nColumn-1.
*/
static void findOrCreateAggInfoColumn(
Parse *pParse, /* Parsing context */
@@ -112653,6 +114990,7 @@ static void findOrCreateAggInfoColumn(
assert( pAggInfo->iFirstReg==0 );
pCol = pAggInfo->aCol;
for(k=0; k<pAggInfo->nColumn; k++, pCol++){
+ if( pCol->pCExpr==pExpr ) return;
if( pCol->iTable==pExpr->iTable
&& pCol->iColumn==pExpr->iColumn
&& pExpr->op!=TK_IF_NULL_ROW
@@ -112777,13 +115115,14 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
case TK_AGG_FUNCTION: {
if( (pNC->ncFlags & NC_InAggFunc)==0
&& pWalker->walkerDepth==pExpr->op2
+ && pExpr->pAggInfo==0
){
/* Check to see if pExpr is a duplicate of another aggregate
** function that is already in the pAggInfo structure
*/
struct AggInfo_func *pItem = pAggInfo->aFunc;
for(i=0; i<pAggInfo->nFunc; i++, pItem++){
- if( pItem->pFExpr==pExpr ) break;
+ if( NEVER(pItem->pFExpr==pExpr) ) break;
if( sqlite3ExprCompare(0, pItem->pFExpr, pExpr, -1)==0 ){
break;
}
@@ -112794,14 +115133,44 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
u8 enc = ENC(pParse->db);
i = addAggInfoFunc(pParse->db, pAggInfo);
if( i>=0 ){
+ int nArg;
assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
pItem = &pAggInfo->aFunc[i];
pItem->pFExpr = pExpr;
assert( ExprUseUToken(pExpr) );
+ nArg = pExpr->x.pList ? pExpr->x.pList->nExpr : 0;
pItem->pFunc = sqlite3FindFunction(pParse->db,
- pExpr->u.zToken,
- pExpr->x.pList ? pExpr->x.pList->nExpr : 0, enc, 0);
- if( pExpr->flags & EP_Distinct ){
+ pExpr->u.zToken, nArg, enc, 0);
+ assert( pItem->bOBUnique==0 );
+ if( pExpr->pLeft
+ && (pItem->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)==0
+ ){
+ /* The NEEDCOLL test above causes any ORDER BY clause on
+ ** aggregate min() or max() to be ignored. */
+ ExprList *pOBList;
+ assert( nArg>0 );
+ assert( pExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pExpr->pLeft) );
+ pItem->iOBTab = pParse->nTab++;
+ pOBList = pExpr->pLeft->x.pList;
+ assert( pOBList->nExpr>0 );
+ assert( pItem->bOBUnique==0 );
+ if( pOBList->nExpr==1
+ && nArg==1
+ && sqlite3ExprCompare(0,pOBList->a[0].pExpr,
+ pExpr->x.pList->a[0].pExpr,0)==0
+ ){
+ pItem->bOBPayload = 0;
+ pItem->bOBUnique = ExprHasProperty(pExpr, EP_Distinct);
+ }else{
+ pItem->bOBPayload = 1;
+ }
+ pItem->bUseSubtype =
+ (pItem->pFunc->funcFlags & SQLITE_SUBTYPE)!=0;
+ }else{
+ pItem->iOBTab = -1;
+ }
+ if( ExprHasProperty(pExpr, EP_Distinct) && !pItem->bOBUnique ){
pItem->iDistinct = pParse->nTab++;
}else{
pItem->iDistinct = -1;
@@ -113437,14 +115806,19 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){
/* Verify that constraints are still satisfied */
if( pNew->pCheck!=0
|| (pCol->notNull && (pCol->colFlags & COLFLAG_GENERATED)!=0)
+ || (pTab->tabFlags & TF_Strict)!=0
){
sqlite3NestedParse(pParse,
"SELECT CASE WHEN quick_check GLOB 'CHECK*'"
" THEN raise(ABORT,'CHECK constraint failed')"
+ " WHEN quick_check GLOB 'non-* value in*'"
+ " THEN raise(ABORT,'type mismatch on DEFAULT')"
" ELSE raise(ABORT,'NOT NULL constraint failed')"
" END"
" FROM pragma_quick_check(%Q,%Q)"
- " WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'",
+ " WHERE quick_check GLOB 'CHECK*'"
+ " OR quick_check GLOB 'NULL*'"
+ " OR quick_check GLOB 'non-* value in*'",
zTab, zDb
);
}
@@ -113533,7 +115907,7 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){
pNew->u.tab.pDfltList = sqlite3ExprListDup(db, pTab->u.tab.pDfltList, 0);
pNew->pSchema = db->aDb[iDb].pSchema;
pNew->u.tab.addColOffset = pTab->u.tab.addColOffset;
- pNew->nTabRef = 1;
+ assert( pNew->nTabRef==1 );
exit_begin_add_column:
sqlite3SrcListDelete(db, pSrc);
@@ -114038,7 +116412,7 @@ static RenameToken *renameColumnTokenNext(RenameCtx *pCtx){
}
/*
-** An error occured while parsing or otherwise processing a database
+** An error occurred while parsing or otherwise processing a database
** object (either pParse->pNewTable, pNewIndex or pNewTrigger) as part of an
** ALTER TABLE RENAME COLUMN program. The error message emitted by the
** sub-routine is currently stored in pParse->zErrMsg. This function
@@ -115559,9 +117933,9 @@ static void openStatTable(
typedef struct StatAccum StatAccum;
typedef struct StatSample StatSample;
struct StatSample {
- tRowcnt *anEq; /* sqlite_stat4.nEq */
tRowcnt *anDLt; /* sqlite_stat4.nDLt */
#ifdef SQLITE_ENABLE_STAT4
+ tRowcnt *anEq; /* sqlite_stat4.nEq */
tRowcnt *anLt; /* sqlite_stat4.nLt */
union {
i64 iRowid; /* Rowid in main table of the key */
@@ -115719,9 +118093,9 @@ static void statInit(
/* Allocate the space required for the StatAccum object */
n = sizeof(*p)
- + sizeof(tRowcnt)*nColUp /* StatAccum.anEq */
- + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */
+ + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */
#ifdef SQLITE_ENABLE_STAT4
+ n += sizeof(tRowcnt)*nColUp; /* StatAccum.anEq */
if( mxSample ){
n += sizeof(tRowcnt)*nColUp /* StatAccum.anLt */
+ sizeof(StatSample)*(nCol+mxSample) /* StatAccum.aBest[], a[] */
@@ -115742,9 +118116,9 @@ static void statInit(
p->nKeyCol = nKeyCol;
p->nSkipAhead = 0;
p->current.anDLt = (tRowcnt*)&p[1];
- p->current.anEq = &p->current.anDLt[nColUp];
#ifdef SQLITE_ENABLE_STAT4
+ p->current.anEq = &p->current.anDLt[nColUp];
p->mxSample = p->nLimit==0 ? mxSample : 0;
if( mxSample ){
u8 *pSpace; /* Allocated space not yet assigned */
@@ -116011,7 +118385,9 @@ static void statPush(
if( p->nRow==0 ){
/* This is the first call to this function. Do initialization. */
+#ifdef SQLITE_ENABLE_STAT4
for(i=0; i<p->nCol; i++) p->current.anEq[i] = 1;
+#endif
}else{
/* Second and subsequent calls get processed here */
#ifdef SQLITE_ENABLE_STAT4
@@ -116020,15 +118396,17 @@ static void statPush(
/* Update anDLt[], anLt[] and anEq[] to reflect the values that apply
** to the current row of the index. */
+#ifdef SQLITE_ENABLE_STAT4
for(i=0; i<iChng; i++){
p->current.anEq[i]++;
}
+#endif
for(i=iChng; i<p->nCol; i++){
p->current.anDLt[i]++;
#ifdef SQLITE_ENABLE_STAT4
if( p->mxSample ) p->current.anLt[i] += p->current.anEq[i];
-#endif
p->current.anEq[i] = 1;
+#endif
}
}
@@ -116162,7 +118540,9 @@ static void statGet(
u64 iVal = (p->nRow + nDistinct - 1) / nDistinct;
if( iVal==2 && p->nRow*10 <= nDistinct*11 ) iVal = 1;
sqlite3_str_appendf(&sStat, " %llu", iVal);
+#ifdef SQLITE_ENABLE_STAT4
assert( p->current.anEq[i] );
+#endif
}
sqlite3ResultStrAccum(context, &sStat);
}
@@ -116851,6 +119231,16 @@ static void decodeIntArray(
while( z[0]!=0 && z[0]!=' ' ) z++;
while( z[0]==' ' ) z++;
}
+
+ /* Set the bLowQual flag if the peak number of rows obtained
+ ** from a full equality match is so large that a full table scan
+ ** seems likely to be faster than using the index.
+ */
+ if( aLog[0] > 66 /* Index has more than 100 rows */
+ && aLog[0] <= aLog[nOut-1] /* And only a single value seen */
+ ){
+ pIndex->bLowQual = 1;
+ }
}
}
@@ -117144,14 +119534,15 @@ static int loadStatTbl(
decodeIntArray((char*)sqlite3_column_text(pStmt,2),nCol,pSample->anLt,0,0);
decodeIntArray((char*)sqlite3_column_text(pStmt,3),nCol,pSample->anDLt,0,0);
- /* Take a copy of the sample. Add two 0x00 bytes the end of the buffer.
+ /* Take a copy of the sample. Add 8 extra 0x00 bytes the end of the buffer.
** This is in case the sample record is corrupted. In that case, the
** sqlite3VdbeRecordCompare() may read up to two varints past the
** end of the allocated buffer before it realizes it is dealing with
- ** a corrupt record. Adding the two 0x00 bytes prevents this from causing
+ ** a corrupt record. Or it might try to read a large integer from the
+ ** buffer. In any case, eight 0x00 bytes prevents this from causing
** a buffer overread. */
pSample->n = sqlite3_column_bytes(pStmt, 4);
- pSample->p = sqlite3DbMallocZero(db, pSample->n + 2);
+ pSample->p = sqlite3DbMallocZero(db, pSample->n + 8);
if( pSample->p==0 ){
sqlite3_finalize(pStmt);
return SQLITE_NOMEM_BKPT;
@@ -118109,7 +120500,7 @@ SQLITE_PRIVATE int sqlite3AuthCheck(
sqlite3 *db = pParse->db;
int rc;
- /* Don't do any authorization checks if the database is initialising
+ /* Don't do any authorization checks if the database is initializing
** or if the parser is being invoked from within sqlite3_declare_vtab.
*/
assert( !IN_RENAME_OBJECT || db->xAuth==0 );
@@ -118410,29 +120801,26 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
pParse->nVtabLock = 0;
#endif
+#ifndef SQLITE_OMIT_SHARED_CACHE
/* Once all the cookies have been verified and transactions opened,
** obtain the required table-locks. This is a no-op unless the
** shared-cache feature is enabled.
*/
- codeTableLocks(pParse);
+ if( pParse->nTableLock ) codeTableLocks(pParse);
+#endif
/* Initialize any AUTOINCREMENT data structures required.
*/
- sqlite3AutoincrementBegin(pParse);
+ if( pParse->pAinc ) sqlite3AutoincrementBegin(pParse);
- /* Code constant expressions that where factored out of inner loops.
- **
- ** The pConstExpr list might also contain expressions that we simply
- ** want to keep around until the Parse object is deleted. Such
- ** expressions have iConstExprReg==0. Do not generate code for
- ** those expressions, of course.
+ /* Code constant expressions that were factored out of inner loops.
*/
if( pParse->pConstExpr ){
ExprList *pEL = pParse->pConstExpr;
pParse->okConstFactor = 0;
for(i=0; i<pEL->nExpr; i++){
- int iReg = pEL->a[i].u.iConstExprReg;
- sqlite3ExprCode(pParse, pEL->a[i].pExpr, iReg);
+ assert( pEL->a[i].u.iConstExprReg>0 );
+ sqlite3ExprCode(pParse, pEL->a[i].pExpr, pEL->a[i].u.iConstExprReg);
}
}
@@ -118899,7 +121287,7 @@ SQLITE_PRIVATE void sqlite3ColumnSetExpr(
*/
SQLITE_PRIVATE Expr *sqlite3ColumnExpr(Table *pTab, Column *pCol){
if( pCol->iDflt==0 ) return 0;
- if( NEVER(!IsOrdinaryTable(pTab)) ) return 0;
+ if( !IsOrdinaryTable(pTab) ) return 0;
if( NEVER(pTab->u.tab.pDfltList==0) ) return 0;
if( NEVER(pTab->u.tab.pDfltList->nExpr<pCol->iDflt) ) return 0;
return pTab->u.tab.pDfltList->a[pCol->iDflt-1].pExpr;
@@ -118931,7 +121319,7 @@ SQLITE_PRIVATE void sqlite3ColumnSetColl(
}
/*
-** Return the collating squence name for a column
+** Return the collating sequence name for a column
*/
SQLITE_PRIVATE const char *sqlite3ColumnColl(Column *pCol){
const char *z;
@@ -119052,6 +121440,9 @@ SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){
if( db->pnBytesFreed==0 && (--pTable->nTabRef)>0 ) return;
deleteTable(db, pTable);
}
+SQLITE_PRIVATE void sqlite3DeleteTableGeneric(sqlite3 *db, void *pTable){
+ sqlite3DeleteTable(db, (Table*)pTable);
+}
/*
@@ -119587,19 +121978,13 @@ SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table *pTab, Column *pCol){
#endif
/*
-** Name of the special TEMP trigger used to implement RETURNING. The
-** name begins with "sqlite_" so that it is guaranteed not to collide
-** with any application-generated triggers.
-*/
-#define RETURNING_TRIGGER_NAME "sqlite_returning"
-
-/*
** Clean up the data structures associated with the RETURNING clause.
*/
-static void sqlite3DeleteReturning(sqlite3 *db, Returning *pRet){
+static void sqlite3DeleteReturning(sqlite3 *db, void *pArg){
+ Returning *pRet = (Returning*)pArg;
Hash *pHash;
pHash = &(db->aDb[1].pSchema->trigHash);
- sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, 0);
+ sqlite3HashInsert(pHash, pRet->zName, 0);
sqlite3ExprListDelete(db, pRet->pReturnEL);
sqlite3DbFree(db, pRet);
}
@@ -119638,11 +122023,12 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){
pParse->u1.pReturning = pRet;
pRet->pParse = pParse;
pRet->pReturnEL = pList;
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3DeleteReturning, pRet);
+ sqlite3ParserAddCleanup(pParse, sqlite3DeleteReturning, pRet);
testcase( pParse->earlyCleanup );
if( db->mallocFailed ) return;
- pRet->retTrig.zName = RETURNING_TRIGGER_NAME;
+ sqlite3_snprintf(sizeof(pRet->zName), pRet->zName,
+ "sqlite_returning_%p", pParse);
+ pRet->retTrig.zName = pRet->zName;
pRet->retTrig.op = TK_RETURNING;
pRet->retTrig.tr_tm = TRIGGER_AFTER;
pRet->retTrig.bReturning = 1;
@@ -119653,9 +122039,9 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){
pRet->retTStep.pTrig = &pRet->retTrig;
pRet->retTStep.pExprList = pList;
pHash = &(db->aDb[1].pSchema->trigHash);
- assert( sqlite3HashFind(pHash, RETURNING_TRIGGER_NAME)==0
+ assert( sqlite3HashFind(pHash, pRet->zName)==0
|| pParse->nErr || pParse->ifNotExists );
- if( sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, &pRet->retTrig)
+ if( sqlite3HashInsert(pHash, pRet->zName, &pRet->retTrig)
==&pRet->retTrig ){
sqlite3OomFault(db);
}
@@ -119689,7 +122075,7 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){
}
if( !IN_RENAME_OBJECT ) sqlite3DequoteToken(&sName);
- /* Because keywords GENERATE ALWAYS can be converted into indentifiers
+ /* Because keywords GENERATE ALWAYS can be converted into identifiers
** by the parser, we can sometimes end up with a typename that ends
** with "generated always". Check for this case and omit the surplus
** text. */
@@ -119836,7 +122222,8 @@ SQLITE_PRIVATE char sqlite3AffinityType(const char *zIn, Column *pCol){
assert( zIn!=0 );
while( zIn[0] ){
- h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff];
+ u8 x = *(u8*)zIn;
+ h = (h<<8) + sqlite3UpperToLower[x];
zIn++;
if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */
aff = SQLITE_AFF_TEXT;
@@ -119910,7 +122297,7 @@ SQLITE_PRIVATE void sqlite3AddDefaultValue(
Parse *pParse, /* Parsing context */
Expr *pExpr, /* The parsed expression of the default value */
const char *zStart, /* Start of the default value text */
- const char *zEnd /* First character past end of defaut value text */
+ const char *zEnd /* First character past end of default value text */
){
Table *p;
Column *pCol;
@@ -120258,7 +122645,7 @@ static int identLength(const char *z){
** to the specified offset in the buffer and updates *pIdx to refer
** to the first byte after the last byte written before returning.
**
-** If the string zSignedIdent consists entirely of alpha-numeric
+** If the string zSignedIdent consists entirely of alphanumeric
** characters, does not begin with a digit and is not an SQL keyword,
** then it is copied to the output buffer exactly as it is. Otherwise,
** it is quoted using double-quotes.
@@ -120410,7 +122797,7 @@ static void estimateIndexWidth(Index *pIdx){
for(i=0; i<pIdx->nColumn; i++){
i16 x = pIdx->aiColumn[i];
assert( x<pIdx->pTable->nCol );
- wIndex += x<0 ? 1 : aCol[pIdx->aiColumn[i]].szEst;
+ wIndex += x<0 ? 1 : aCol[x].szEst;
}
pIdx->szIdxRow = sqlite3LogEst(wIndex*4);
}
@@ -121099,6 +123486,17 @@ SQLITE_PRIVATE void sqlite3EndTable(
/* Reparse everything to update our internal data structures */
sqlite3VdbeAddParseSchemaOp(v, iDb,
sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName),0);
+
+ /* Test for cycles in generated columns and illegal expressions
+ ** in CHECK constraints and in DEFAULT clauses. */
+ if( p->tabFlags & TF_HasGenerated ){
+ sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0,
+ sqlite3MPrintf(db, "SELECT*FROM\"%w\".\"%w\"",
+ db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC);
+ }
+ sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0,
+ sqlite3MPrintf(db, "PRAGMA \"%w\".integrity_check(%Q)",
+ db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC);
}
/* Add the table to the in-memory representation of the database.
@@ -122148,7 +124546,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
#ifndef SQLITE_OMIT_TEMPDB
/* If the index name was unqualified, check if the table
** is a temp table. If so, set the database to 1. Do not do this
- ** if initialising a database schema.
+ ** if initializing a database schema.
*/
if( !db->init.busy ){
pTab = sqlite3SrcListLookup(pParse, pTblName);
@@ -123690,7 +126088,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){
if( iDb<0 ) return;
z = sqlite3NameFromToken(db, pObjName);
if( z==0 ) return;
- zDb = db->aDb[iDb].zDbSName;
+ zDb = pName2->n ? db->aDb[iDb].zDbSName : 0;
pTab = sqlite3FindTable(db, z, zDb);
if( pTab ){
reindexTable(pParse, pTab, 0);
@@ -123700,6 +126098,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){
pIndex = sqlite3FindIndex(db, z, zDb);
sqlite3DbFree(db, z);
if( pIndex ){
+ iDb = sqlite3SchemaToIndex(db, pIndex->pTable->pSchema);
sqlite3BeginWriteOperation(pParse, 0, iDb);
sqlite3RefillIndex(pParse, pIndex, -1);
return;
@@ -123805,7 +126204,7 @@ SQLITE_PRIVATE void sqlite3CteDelete(sqlite3 *db, Cte *pCte){
/*
** This routine is invoked once per CTE by the parser while parsing a
-** WITH clause. The CTE described by teh third argument is added to
+** WITH clause. The CTE described by the third argument is added to
** the WITH clause of the second argument. If the second argument is
** NULL, then a new WITH argument is created.
*/
@@ -123865,6 +126264,9 @@ SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){
sqlite3DbFree(db, pWith);
}
}
+SQLITE_PRIVATE void sqlite3WithDeleteGeneric(sqlite3 *db, void *pWith){
+ sqlite3WithDelete(db, (With*)pWith);
+}
#endif /* !defined(SQLITE_OMIT_CTE) */
/************** End of build.c ***********************************************/
@@ -124447,8 +126849,9 @@ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){
Table *pTab;
assert( pItem && pSrc->nSrc>=1 );
pTab = sqlite3LocateTableItem(pParse, 0, pItem);
- sqlite3DeleteTable(pParse->db, pItem->pTab);
+ if( pItem->pTab ) sqlite3DeleteTable(pParse->db, pItem->pTab);
pItem->pTab = pTab;
+ pItem->fg.notCte = 1;
if( pTab ){
pTab->nTabRef++;
if( pItem->fg.isIndexedBy && sqlite3IndexedByLookup(pParse, pItem) ){
@@ -124601,7 +127004,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere(
sqlite3 *db = pParse->db;
Expr *pLhs = NULL; /* LHS of IN(SELECT...) operator */
Expr *pInClause = NULL; /* WHERE rowid IN ( select ) */
- ExprList *pEList = NULL; /* Expression list contaning only pSelectRowid */
+ ExprList *pEList = NULL; /* Expression list containing only pSelectRowid*/
SrcList *pSelectSrc = NULL; /* SELECT rowid FROM x ... (dup of pSrc) */
Select *pSelect = NULL; /* Complete SELECT tree */
Table *pTab;
@@ -124639,14 +127042,20 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere(
);
}else{
Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ assert( pPk!=0 );
+ assert( pPk->nKeyCol>=1 );
if( pPk->nKeyCol==1 ){
- const char *zName = pTab->aCol[pPk->aiColumn[0]].zCnName;
+ const char *zName;
+ assert( pPk->aiColumn[0]>=0 && pPk->aiColumn[0]<pTab->nCol );
+ zName = pTab->aCol[pPk->aiColumn[0]].zCnName;
pLhs = sqlite3Expr(db, TK_ID, zName);
pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ID, zName));
}else{
int i;
for(i=0; i<pPk->nKeyCol; i++){
- Expr *p = sqlite3Expr(db, TK_ID, pTab->aCol[pPk->aiColumn[i]].zCnName);
+ Expr *p;
+ assert( pPk->aiColumn[i]>=0 && pPk->aiColumn[i]<pTab->nCol );
+ p = sqlite3Expr(db, TK_ID, pTab->aCol[pPk->aiColumn[i]].zCnName);
pEList = sqlite3ExprListAppend(pParse, pEList, p);
}
pLhs = sqlite3PExpr(pParse, TK_VECTOR, 0, 0);
@@ -124675,7 +127084,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere(
pOrderBy,0,pLimit
);
- /* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */
+ /* now generate the new WHERE rowid IN clause for the DELETE/UPDATE */
pInClause = sqlite3PExpr(pParse, TK_IN, pLhs, 0);
sqlite3PExprAddSelect(pParse, pInClause, pSelect);
return pInClause;
@@ -124904,7 +127313,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
if( HasRowid(pTab) ){
/* For a rowid table, initialize the RowSet to an empty set */
pPk = 0;
- nPk = 1;
+ assert( nPk==1 );
iRowSet = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet);
}else{
@@ -124932,7 +127341,8 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
if( pWInfo==0 ) goto delete_from_cleanup;
eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass);
assert( IsVirtual(pTab)==0 || eOnePass!=ONEPASS_MULTI );
- assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF );
+ assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF
+ || OptimizationDisabled(db, SQLITE_OnePass) );
if( eOnePass!=ONEPASS_SINGLE ) sqlite3MultiWrite(pParse);
if( sqlite3WhereUsesDeferredSeek(pWInfo) ){
sqlite3VdbeAddOp1(v, OP_FinishSeek, iTabCur);
@@ -125269,9 +127679,11 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete(
sqlite3FkActions(pParse, pTab, 0, iOld, 0, 0);
/* Invoke AFTER DELETE trigger programs. */
- sqlite3CodeRowTrigger(pParse, pTrigger,
- TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel
- );
+ if( pTrigger ){
+ sqlite3CodeRowTrigger(pParse, pTrigger,
+ TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel
+ );
+ }
/* Jump here if the row had already been deleted before any BEFORE
** trigger programs were invoked. Or if a trigger program throws a
@@ -125585,6 +127997,42 @@ static void lengthFunc(
}
/*
+** Implementation of the octet_length() function
+*/
+static void bytelengthFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ switch( sqlite3_value_type(argv[0]) ){
+ case SQLITE_BLOB: {
+ sqlite3_result_int(context, sqlite3_value_bytes(argv[0]));
+ break;
+ }
+ case SQLITE_INTEGER:
+ case SQLITE_FLOAT: {
+ i64 m = sqlite3_context_db_handle(context)->enc<=SQLITE_UTF8 ? 1 : 2;
+ sqlite3_result_int64(context, sqlite3_value_bytes(argv[0])*m);
+ break;
+ }
+ case SQLITE_TEXT: {
+ if( sqlite3_value_encoding(argv[0])<=SQLITE_UTF8 ){
+ sqlite3_result_int(context, sqlite3_value_bytes(argv[0]));
+ }else{
+ sqlite3_result_int(context, sqlite3_value_bytes16(argv[0]));
+ }
+ break;
+ }
+ default: {
+ sqlite3_result_null(context);
+ break;
+ }
+ }
+}
+
+/*
** Implementation of the abs() function.
**
** IMP: R-23979-26855 The abs(X) function returns the absolute value of
@@ -125860,7 +128308,7 @@ static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){
}else if( n==0 ){
r = (double)((sqlite_int64)(r+(r<0?-0.5:+0.5)));
}else{
- zBuf = sqlite3_mprintf("%.*f",n,r);
+ zBuf = sqlite3_mprintf("%!.*f",n,r);
if( zBuf==0 ){
sqlite3_result_error_nomem(context);
return;
@@ -126060,7 +128508,7 @@ struct compareInfo {
/*
** For LIKE and GLOB matching on EBCDIC machines, assume that every
-** character is exactly one byte in size. Also, provde the Utf8Read()
+** character is exactly one byte in size. Also, provide the Utf8Read()
** macro for fast reading of the next character in the common case where
** the next character is ASCII.
*/
@@ -126293,7 +128741,7 @@ SQLITE_API int sqlite3_like_count = 0;
/*
** Implementation of the like() SQL function. This function implements
-** the build-in LIKE operator. The first argument to the function is the
+** the built-in LIKE operator. The first argument to the function is the
** pattern and the second argument is the string. So, the SQL statements:
**
** A LIKE B
@@ -126626,6 +129074,7 @@ static void charFunc(
*zOut++ = 0x80 + (u8)(c & 0x3F);
} \
}
+ *zOut = 0;
sqlite3_result_text64(context, (char*)z, zOut-z, sqlite3_free, SQLITE_UTF8);
}
@@ -126654,7 +129103,8 @@ static void hexFunc(
*(z++) = hexdigits[c&0xf];
}
*z = 0;
- sqlite3_result_text(context, zHex, n*2, sqlite3_free);
+ sqlite3_result_text64(context, zHex, (u64)(z-zHex),
+ sqlite3_free, SQLITE_UTF8);
}
}
@@ -126679,7 +129129,7 @@ static int strContainsChar(const u8 *zStr, int nStr, u32 ch){
** decoded and returned as a blob.
**
** If there is only a single argument, then it must consist only of an
-** even number of hexadeximal digits. Otherwise, return NULL.
+** even number of hexadecimal digits. Otherwise, return NULL.
**
** Or, if there is a second argument, then any character that appears in
** the second argument is also allowed to appear between pairs of hexadecimal
@@ -126948,6 +129398,81 @@ static void trimFunc(
sqlite3_result_text(context, (char*)zIn, nIn, SQLITE_TRANSIENT);
}
+/* The core implementation of the CONCAT(...) and CONCAT_WS(SEP,...)
+** functions.
+**
+** Return a string value that is the concatenation of all non-null
+** entries in argv[]. Use zSep as the separator.
+*/
+static void concatFuncCore(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv,
+ int nSep,
+ const char *zSep
+){
+ i64 j, k, n = 0;
+ int i;
+ char *z;
+ for(i=0; i<argc; i++){
+ n += sqlite3_value_bytes(argv[i]);
+ }
+ n += (argc-1)*nSep;
+ z = sqlite3_malloc64(n+1);
+ if( z==0 ){
+ sqlite3_result_error_nomem(context);
+ return;
+ }
+ j = 0;
+ for(i=0; i<argc; i++){
+ k = sqlite3_value_bytes(argv[i]);
+ if( k>0 ){
+ const char *v = (const char*)sqlite3_value_text(argv[i]);
+ if( v!=0 ){
+ if( j>0 && nSep>0 ){
+ memcpy(&z[j], zSep, nSep);
+ j += nSep;
+ }
+ memcpy(&z[j], v, k);
+ j += k;
+ }
+ }
+ }
+ z[j] = 0;
+ assert( j<=n );
+ sqlite3_result_text64(context, z, j, sqlite3_free, SQLITE_UTF8);
+}
+
+/*
+** The CONCAT(...) function. Generate a string result that is the
+** concatentation of all non-null arguments.
+*/
+static void concatFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ concatFuncCore(context, argc, argv, 0, "");
+}
+
+/*
+** The CONCAT_WS(separator, ...) function.
+**
+** Generate a string that is the concatenation of 2nd through the Nth
+** argument. Use the first argument (which must be non-NULL) as the
+** separator.
+*/
+static void concatwsFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ int nSep = sqlite3_value_bytes(argv[0]);
+ const char *zSep = (const char*)sqlite3_value_text(argv[0]);
+ if( zSep==0 ) return;
+ concatFuncCore(context, argc-1, argv+1, nSep, zSep);
+}
+
#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION
/*
@@ -127069,14 +129594,69 @@ static void loadExt(sqlite3_context *context, int argc, sqlite3_value **argv){
*/
typedef struct SumCtx SumCtx;
struct SumCtx {
- double rSum; /* Floating point sum */
- i64 iSum; /* Integer sum */
+ double rSum; /* Running sum as as a double */
+ double rErr; /* Error term for Kahan-Babushka-Neumaier summation */
+ i64 iSum; /* Running sum as a signed integer */
i64 cnt; /* Number of elements summed */
- u8 overflow; /* True if integer overflow seen */
- u8 approx; /* True if non-integer value was input to the sum */
+ u8 approx; /* True if any non-integer value was input to the sum */
+ u8 ovrfl; /* Integer overflow seen */
};
/*
+** Do one step of the Kahan-Babushka-Neumaier summation.
+**
+** https://en.wikipedia.org/wiki/Kahan_summation_algorithm
+**
+** Variables are marked "volatile" to defeat c89 x86 floating point
+** optimizations can mess up this algorithm.
+*/
+static void kahanBabuskaNeumaierStep(
+ volatile SumCtx *pSum,
+ volatile double r
+){
+ volatile double s = pSum->rSum;
+ volatile double t = s + r;
+ if( fabs(s) > fabs(r) ){
+ pSum->rErr += (s - t) + r;
+ }else{
+ pSum->rErr += (r - t) + s;
+ }
+ pSum->rSum = t;
+}
+
+/*
+** Add a (possibly large) integer to the running sum.
+*/
+static void kahanBabuskaNeumaierStepInt64(volatile SumCtx *pSum, i64 iVal){
+ if( iVal<=-4503599627370496LL || iVal>=+4503599627370496LL ){
+ i64 iBig, iSm;
+ iSm = iVal % 16384;
+ iBig = iVal - iSm;
+ kahanBabuskaNeumaierStep(pSum, iBig);
+ kahanBabuskaNeumaierStep(pSum, iSm);
+ }else{
+ kahanBabuskaNeumaierStep(pSum, (double)iVal);
+ }
+}
+
+/*
+** Initialize the Kahan-Babaska-Neumaier sum from a 64-bit integer
+*/
+static void kahanBabuskaNeumaierInit(
+ volatile SumCtx *p,
+ i64 iVal
+){
+ if( iVal<=-4503599627370496LL || iVal>=+4503599627370496LL ){
+ i64 iSm = iVal % 16384;
+ p->rSum = (double)(iVal - iSm);
+ p->rErr = (double)iSm;
+ }else{
+ p->rSum = (double)iVal;
+ p->rErr = 0.0;
+ }
+}
+
+/*
** Routines used to compute the sum, average, and total.
**
** The SUM() function follows the (broken) SQL standard which means
@@ -127095,15 +129675,29 @@ static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){
type = sqlite3_value_numeric_type(argv[0]);
if( p && type!=SQLITE_NULL ){
p->cnt++;
- if( type==SQLITE_INTEGER ){
- i64 v = sqlite3_value_int64(argv[0]);
- p->rSum += v;
- if( (p->approx|p->overflow)==0 && sqlite3AddInt64(&p->iSum, v) ){
- p->approx = p->overflow = 1;
+ if( p->approx==0 ){
+ if( type!=SQLITE_INTEGER ){
+ kahanBabuskaNeumaierInit(p, p->iSum);
+ p->approx = 1;
+ kahanBabuskaNeumaierStep(p, sqlite3_value_double(argv[0]));
+ }else{
+ i64 x = p->iSum;
+ if( sqlite3AddInt64(&x, sqlite3_value_int64(argv[0]))==0 ){
+ p->iSum = x;
+ }else{
+ p->ovrfl = 1;
+ kahanBabuskaNeumaierInit(p, p->iSum);
+ p->approx = 1;
+ kahanBabuskaNeumaierStepInt64(p, sqlite3_value_int64(argv[0]));
+ }
}
}else{
- p->rSum += sqlite3_value_double(argv[0]);
- p->approx = 1;
+ if( type==SQLITE_INTEGER ){
+ kahanBabuskaNeumaierStepInt64(p, sqlite3_value_int64(argv[0]));
+ }else{
+ p->ovrfl = 0;
+ kahanBabuskaNeumaierStep(p, sqlite3_value_double(argv[0]));
+ }
}
}
}
@@ -127120,13 +129714,18 @@ static void sumInverse(sqlite3_context *context, int argc, sqlite3_value**argv){
if( ALWAYS(p) && type!=SQLITE_NULL ){
assert( p->cnt>0 );
p->cnt--;
- assert( type==SQLITE_INTEGER || p->approx );
- if( type==SQLITE_INTEGER && p->approx==0 ){
- i64 v = sqlite3_value_int64(argv[0]);
- p->rSum -= v;
- p->iSum -= v;
+ if( !p->approx ){
+ p->iSum -= sqlite3_value_int64(argv[0]);
+ }else if( type==SQLITE_INTEGER ){
+ i64 iVal = sqlite3_value_int64(argv[0]);
+ if( iVal!=SMALLEST_INT64 ){
+ kahanBabuskaNeumaierStepInt64(p, -iVal);
+ }else{
+ kahanBabuskaNeumaierStepInt64(p, LARGEST_INT64);
+ kahanBabuskaNeumaierStepInt64(p, 1);
+ }
}else{
- p->rSum -= sqlite3_value_double(argv[0]);
+ kahanBabuskaNeumaierStep(p, -sqlite3_value_double(argv[0]));
}
}
}
@@ -127137,10 +129736,14 @@ static void sumFinalize(sqlite3_context *context){
SumCtx *p;
p = sqlite3_aggregate_context(context, 0);
if( p && p->cnt>0 ){
- if( p->overflow ){
- sqlite3_result_error(context,"integer overflow",-1);
- }else if( p->approx ){
- sqlite3_result_double(context, p->rSum);
+ if( p->approx ){
+ if( p->ovrfl ){
+ sqlite3_result_error(context,"integer overflow",-1);
+ }else if( !sqlite3IsNaN(p->rErr) ){
+ sqlite3_result_double(context, p->rSum+p->rErr);
+ }else{
+ sqlite3_result_double(context, p->rSum);
+ }
}else{
sqlite3_result_int64(context, p->iSum);
}
@@ -127150,14 +129753,29 @@ static void avgFinalize(sqlite3_context *context){
SumCtx *p;
p = sqlite3_aggregate_context(context, 0);
if( p && p->cnt>0 ){
- sqlite3_result_double(context, p->rSum/(double)p->cnt);
+ double r;
+ if( p->approx ){
+ r = p->rSum;
+ if( !sqlite3IsNaN(p->rErr) ) r += p->rErr;
+ }else{
+ r = (double)(p->iSum);
+ }
+ sqlite3_result_double(context, r/(double)p->cnt);
}
}
static void totalFinalize(sqlite3_context *context){
SumCtx *p;
+ double r = 0.0;
p = sqlite3_aggregate_context(context, 0);
- /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */
- sqlite3_result_double(context, p ? p->rSum : (double)0);
+ if( p ){
+ if( p->approx ){
+ r = p->rSum;
+ if( !sqlite3IsNaN(p->rErr) ) r += p->rErr;
+ }else{
+ r = (double)(p->iSum);
+ }
+ }
+ sqlite3_result_double(context, r);
}
/*
@@ -127276,6 +129894,7 @@ static void minMaxFinalize(sqlite3_context *context){
/*
** group_concat(EXPR, ?SEPARATOR?)
+** string_agg(EXPR, SEPARATOR)
**
** The SEPARATOR goes before the EXPR string. This is tragic. The
** groupConcatInverse() implementation would have been easier if the
@@ -127379,7 +129998,7 @@ static void groupConcatInverse(
if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
pGCC = (GroupConcatCtx*)sqlite3_aggregate_context(context, sizeof(*pGCC));
/* pGCC is always non-NULL since groupConcatStep() will have always
- ** run frist to initialize it */
+ ** run first to initialize it */
if( ALWAYS(pGCC) ){
int nVS;
/* Must call sqlite3_value_text() to convert the argument into text prior
@@ -127463,8 +130082,10 @@ SQLITE_PRIVATE void sqlite3RegisterPerConnectionBuiltinFunctions(sqlite3 *db){
** sensitive.
*/
SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive){
+ FuncDef *pDef;
struct compareInfo *pInfo;
int flags;
+ int nArg;
if( caseSensitive ){
pInfo = (struct compareInfo*)&likeInfoAlt;
flags = SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE;
@@ -127472,10 +130093,13 @@ SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive)
pInfo = (struct compareInfo*)&likeInfoNorm;
flags = SQLITE_FUNC_LIKE;
}
- sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0);
- sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0);
- sqlite3FindFunction(db, "like", 2, SQLITE_UTF8, 0)->funcFlags |= flags;
- sqlite3FindFunction(db, "like", 3, SQLITE_UTF8, 0)->funcFlags |= flags;
+ for(nArg=2; nArg<=3; nArg++){
+ sqlite3CreateFunc(db, "like", nArg, SQLITE_UTF8, pInfo, likeFunc,
+ 0, 0, 0, 0, 0);
+ pDef = sqlite3FindFunction(db, "like", nArg, SQLITE_UTF8, 0);
+ pDef->funcFlags |= flags;
+ pDef->funcFlags &= ~SQLITE_FUNC_UNSAFE;
+ }
}
/*
@@ -127747,6 +130371,37 @@ static void signFunc(
sqlite3_result_int(context, x<0.0 ? -1 : x>0.0 ? +1 : 0);
}
+#ifdef SQLITE_DEBUG
+/*
+** Implementation of fpdecode(x,y,z) function.
+**
+** x is a real number that is to be decoded. y is the precision.
+** z is the maximum real precision.
+*/
+static void fpdecodeFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ FpDecode s;
+ double x;
+ int y, z;
+ char zBuf[100];
+ UNUSED_PARAMETER(argc);
+ assert( argc==3 );
+ x = sqlite3_value_double(argv[0]);
+ y = sqlite3_value_int(argv[1]);
+ z = sqlite3_value_int(argv[2]);
+ sqlite3FpDecode(&s, x, y, z);
+ if( s.isSpecial==2 ){
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "NaN");
+ }else{
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "%c%.*s/%d", s.sign, s.n, s.z, s.iDP);
+ }
+ sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT);
+}
+#endif /* SQLITE_DEBUG */
+
/*
** All of the FuncDef structures in the aBuiltinFunc[] array above
** to the global function hash table. This occurs at start-time (as
@@ -127811,12 +130466,16 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF),
FUNCTION2(subtype, 1, 0, 0, subtypeFunc, SQLITE_FUNC_TYPEOF),
FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH),
+ FUNCTION2(octet_length, 1, 0, 0, bytelengthFunc,SQLITE_FUNC_BYTELEN),
FUNCTION(instr, 2, 0, 0, instrFunc ),
FUNCTION(printf, -1, 0, 0, printfFunc ),
FUNCTION(format, -1, 0, 0, printfFunc ),
FUNCTION(unicode, 1, 0, 0, unicodeFunc ),
FUNCTION(char, -1, 0, 0, charFunc ),
FUNCTION(abs, 1, 0, 0, absFunc ),
+#ifdef SQLITE_DEBUG
+ FUNCTION(fpdecode, 3, 0, 0, fpdecodeFunc ),
+#endif
#ifndef SQLITE_OMIT_FLOATING_POINT
FUNCTION(round, 1, 0, 0, roundFunc ),
FUNCTION(round, 2, 0, 0, roundFunc ),
@@ -127826,6 +130485,11 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
FUNCTION(hex, 1, 0, 0, hexFunc ),
FUNCTION(unhex, 1, 0, 0, unhexFunc ),
FUNCTION(unhex, 2, 0, 0, unhexFunc ),
+ FUNCTION(concat, -1, 0, 0, concatFunc ),
+ FUNCTION(concat, 0, 0, 0, 0 ),
+ FUNCTION(concat_ws, -1, 0, 0, concatwsFunc ),
+ FUNCTION(concat_ws, 0, 0, 0, 0 ),
+ FUNCTION(concat_ws, 1, 0, 0, 0 ),
INLINE_FUNC(ifnull, 2, INLINEFUNC_coalesce, 0 ),
VFUNCTION(random, 0, 0, 0, randomFunc ),
VFUNCTION(randomblob, 1, 0, 0, randomBlob ),
@@ -127855,6 +130519,8 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
groupConcatFinalize, groupConcatValue, groupConcatInverse, 0),
WAGGREGATE(group_concat, 2, 0, 0, groupConcatStep,
groupConcatFinalize, groupConcatValue, groupConcatInverse, 0),
+ WAGGREGATE(string_agg, 2, 0, 0, groupConcatStep,
+ groupConcatFinalize, groupConcatValue, groupConcatInverse, 0),
LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE),
#ifdef SQLITE_CASE_SENSITIVE_LIKE
@@ -128797,6 +131463,7 @@ static int isSetNullAction(Parse *pParse, FKey *pFKey){
if( (p==pFKey->apTrigger[0] && pFKey->aAction[0]==OE_SetNull)
|| (p==pFKey->apTrigger[1] && pFKey->aAction[1]==OE_SetNull)
){
+ assert( (pTop->db->flags & SQLITE_FkNoAction)==0 );
return 1;
}
}
@@ -128991,6 +131658,8 @@ SQLITE_PRIVATE void sqlite3FkCheck(
}
if( regOld!=0 ){
int eAction = pFKey->aAction[aChange!=0];
+ if( (db->flags & SQLITE_FkNoAction) ) eAction = OE_None;
+
fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regOld, 1);
/* If this is a deferred FK constraint, or a CASCADE or SET NULL
** action applies, then any foreign key violations caused by
@@ -129106,7 +131775,11 @@ SQLITE_PRIVATE int sqlite3FkRequired(
/* Check if any parent key columns are being modified. */
for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){
if( fkParentIsModified(pTab, p, aChange, chngRowid) ){
- if( p->aAction[1]!=OE_None ) return 2;
+ if( (pParse->db->flags & SQLITE_FkNoAction)==0
+ && p->aAction[1]!=OE_None
+ ){
+ return 2;
+ }
bHaveFK = 1;
}
}
@@ -129156,6 +131829,7 @@ static Trigger *fkActionTrigger(
int iAction = (pChanges!=0); /* 1 for UPDATE, 0 for DELETE */
action = pFKey->aAction[iAction];
+ if( (db->flags & SQLITE_FkNoAction) ) action = OE_None;
if( action==OE_Restrict && (db->flags & SQLITE_DeferFKs) ){
return 0;
}
@@ -129387,9 +132061,8 @@ SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){
if( pFKey->pPrevTo ){
pFKey->pPrevTo->pNextTo = pFKey->pNextTo;
}else{
- void *p = (void *)pFKey->pNextTo;
- const char *z = (p ? pFKey->pNextTo->zTo : pFKey->zTo);
- sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, p);
+ const char *z = (pFKey->pNextTo ? pFKey->pNextTo->zTo : pFKey->zTo);
+ sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, pFKey->pNextTo);
}
if( pFKey->pNextTo ){
pFKey->pNextTo->pPrevTo = pFKey->pPrevTo;
@@ -129452,8 +132125,10 @@ SQLITE_PRIVATE void sqlite3OpenTable(
assert( pParse->pVdbe!=0 );
v = pParse->pVdbe;
assert( opcode==OP_OpenWrite || opcode==OP_OpenRead );
- sqlite3TableLock(pParse, iDb, pTab->tnum,
- (opcode==OP_OpenWrite)?1:0, pTab->zName);
+ if( !pParse->db->noSharedCache ){
+ sqlite3TableLock(pParse, iDb, pTab->tnum,
+ (opcode==OP_OpenWrite)?1:0, pTab->zName);
+ }
if( HasRowid(pTab) ){
sqlite3VdbeAddOp4Int(v, opcode, iCur, pTab->tnum, iDb, pTab->nNVCol);
VdbeComment((v, "%s", pTab->zName));
@@ -129582,7 +132257,7 @@ SQLITE_PRIVATE char *sqlite3TableAffinityStr(sqlite3 *db, const Table *pTab){
** For STRICT tables:
** ------------------
**
-** Generate an appropropriate OP_TypeCheck opcode that will verify the
+** Generate an appropriate OP_TypeCheck opcode that will verify the
** datatypes against the column definitions in pTab. If iReg==0, that
** means an OP_MakeRecord opcode has already been generated and should be
** the last opcode generated. The new OP_TypeCheck needs to be inserted
@@ -130874,7 +133549,7 @@ insert_cleanup:
/* This is the Walker callback from sqlite3ExprReferencesUpdatedColumn().
* Set bit 0x01 of pWalker->eCode if pWalker->eCode to 0 and if this
** expression node references any of the
-** columns that are being modifed by an UPDATE statement.
+** columns that are being modified by an UPDATE statement.
*/
static int checkConstraintExprNode(Walker *pWalker, Expr *pExpr){
if( pExpr->op==TK_COLUMN ){
@@ -131097,7 +133772,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
int *aiChng, /* column i is unchanged if aiChng[i]<0 */
Upsert *pUpsert /* ON CONFLICT clauses, if any. NULL otherwise */
){
- Vdbe *v; /* VDBE under constrution */
+ Vdbe *v; /* VDBE under construction */
Index *pIdx; /* Pointer to one of the indices */
Index *pPk = 0; /* The PRIMARY KEY index for WITHOUT ROWID tables */
sqlite3 *db; /* Database connection */
@@ -131580,7 +134255,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
pIdx;
pIdx = indexIteratorNext(&sIdxIter, &ix)
){
- int regIdx; /* Range of registers hold conent for pIdx */
+ int regIdx; /* Range of registers holding content for pIdx */
int regR; /* Range of registers holding conflicting PK */
int iThisCur; /* Cursor for this UNIQUE index */
int addrUniqueOk; /* Jump here if the UNIQUE constraint is satisfied */
@@ -132075,6 +134750,8 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices(
assert( op==OP_OpenRead || op==OP_OpenWrite );
assert( op==OP_OpenWrite || p5==0 );
+ assert( piDataCur!=0 );
+ assert( piIdxCur!=0 );
if( IsVirtual(pTab) ){
/* This routine is a no-op for virtual tables. Leave the output
** variables *piDataCur and *piIdxCur set to illegal cursor numbers
@@ -132087,18 +134764,18 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices(
assert( v!=0 );
if( iBase<0 ) iBase = pParse->nTab;
iDataCur = iBase++;
- if( piDataCur ) *piDataCur = iDataCur;
+ *piDataCur = iDataCur;
if( HasRowid(pTab) && (aToOpen==0 || aToOpen[0]) ){
sqlite3OpenTable(pParse, iDataCur, iDb, pTab, op);
- }else{
+ }else if( pParse->db->noSharedCache==0 ){
sqlite3TableLock(pParse, iDb, pTab->tnum, op==OP_OpenWrite, pTab->zName);
}
- if( piIdxCur ) *piIdxCur = iBase;
+ *piIdxCur = iBase;
for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
int iIdxCur = iBase++;
assert( pIdx->pSchema==pTab->pSchema );
if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){
- if( piDataCur ) *piDataCur = iIdxCur;
+ *piDataCur = iIdxCur;
p5 = 0;
}
if( aToOpen==0 || aToOpen[i+1] ){
@@ -132396,7 +135073,7 @@ static int xferOptimization(
}
#endif
#ifndef SQLITE_OMIT_FOREIGN_KEY
- /* Disallow the transfer optimization if the destination table constains
+ /* Disallow the transfer optimization if the destination table contains
** any foreign key constraints. This is more restrictive than necessary.
** But the main beneficiary of the transfer optimization is the VACUUM
** command, and the VACUUM command disables foreign key constraints. So
@@ -133106,6 +135783,11 @@ struct sqlite3_api_routines {
int (*value_encoding)(sqlite3_value*);
/* Version 3.41.0 and later */
int (*is_interrupted)(sqlite3*);
+ /* Version 3.43.0 and later */
+ int (*stmt_explain)(sqlite3_stmt*,int);
+ /* Version 3.44.0 and later */
+ void *(*get_clientdata)(sqlite3*,const char*);
+ int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*));
};
/*
@@ -133434,6 +136116,11 @@ typedef int (*sqlite3_loadext_entry)(
#define sqlite3_value_encoding sqlite3_api->value_encoding
/* Version 3.41.0 and later */
#define sqlite3_is_interrupted sqlite3_api->is_interrupted
+/* Version 3.43.0 and later */
+#define sqlite3_stmt_explain sqlite3_api->stmt_explain
+/* Version 3.44.0 and later */
+#define sqlite3_get_clientdata sqlite3_api->get_clientdata
+#define sqlite3_set_clientdata sqlite3_api->set_clientdata
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
@@ -133950,7 +136637,12 @@ static const sqlite3_api_routines sqlite3Apis = {
/* Version 3.40.0 and later */
sqlite3_value_encoding,
/* Version 3.41.0 and later */
- sqlite3_is_interrupted
+ sqlite3_is_interrupted,
+ /* Version 3.43.0 and later */
+ sqlite3_stmt_explain,
+ /* Version 3.44.0 and later */
+ sqlite3_get_clientdata,
+ sqlite3_set_clientdata
};
/* True if x is the directory separator character
@@ -134030,6 +136722,10 @@ static int sqlite3LoadExtension(
*/
if( nMsg>SQLITE_MAX_PATHLEN ) goto extension_not_found;
+ /* Do not allow sqlite3_load_extension() to link to a copy of the
+ ** running application, by passing in an empty filename. */
+ if( nMsg==0 ) goto extension_not_found;
+
handle = sqlite3OsDlOpen(pVfs, zFile);
#if SQLITE_OS_UNIX || SQLITE_OS_WIN
for(ii=0; ii<ArraySize(azEndings) && handle==0; ii++){
@@ -134162,6 +136858,9 @@ SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3 *db){
** default so as not to open security holes in older applications.
*/
SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
sqlite3_mutex_enter(db->mutex);
if( onoff ){
db->flags |= SQLITE_LoadExtension|SQLITE_LoadExtFunc;
@@ -134211,6 +136910,9 @@ SQLITE_API int sqlite3_auto_extension(
void (*xInit)(void)
){
int rc = SQLITE_OK;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( xInit==0 ) return SQLITE_MISUSE_BKPT;
+#endif
#ifndef SQLITE_OMIT_AUTOINIT
rc = sqlite3_initialize();
if( rc ){
@@ -134263,6 +136965,9 @@ SQLITE_API int sqlite3_cancel_auto_extension(
int i;
int n = 0;
wsdAutoextInit;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( xInit==0 ) return 0;
+#endif
sqlite3_mutex_enter(mutex);
for(i=(int)wsdAutoext.nExt-1; i>=0; i--){
if( wsdAutoext.aExt[i]==xInit ){
@@ -135862,7 +138567,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
**
** The first form reports the current local setting for the
** page cache spill size. The second form turns cache spill on
- ** or off. When turnning cache spill on, the size is set to the
+ ** or off. When turning cache spill on, the size is set to the
** current cache_size. The third form sets a spill size that
** may be different form the cache size.
** If N is positive then that is the
@@ -136132,7 +138837,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
#endif
if( sqlite3GetBoolean(zRight, 0) ){
- db->flags |= mask;
+ if( (mask & SQLITE_WriteSchema)==0
+ || (db->flags & SQLITE_Defensive)==0
+ ){
+ db->flags |= mask;
+ }
}else{
db->flags &= ~mask;
if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0;
@@ -136640,9 +139349,9 @@ SQLITE_PRIVATE void sqlite3Pragma(
** The "quick_check" is reduced version of
** integrity_check designed to detect most database corruption
** without the overhead of cross-checking indexes. Quick_check
- ** is linear time wherease integrity_check is O(NlogN).
+ ** is linear time whereas integrity_check is O(NlogN).
**
- ** The maximum nubmer of errors is 100 by default. A different default
+ ** The maximum number of errors is 100 by default. A different default
** can be specified using a numeric parameter N.
**
** Or, the parameter N can be the name of a table. In that case, only
@@ -136765,8 +139474,32 @@ SQLITE_PRIVATE void sqlite3Pragma(
int r2; /* Previous key for WITHOUT ROWID tables */
int mxCol; /* Maximum non-virtual column number */
- if( !IsOrdinaryTable(pTab) ) continue;
if( pObjTab && pObjTab!=pTab ) continue;
+ if( !IsOrdinaryTable(pTab) ){
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ sqlite3_vtab *pVTab;
+ int a1;
+ if( !IsVirtual(pTab) ) continue;
+ if( pTab->nCol<=0 ){
+ const char *zMod = pTab->u.vtab.azArg[0];
+ if( sqlite3HashFind(&db->aModule, zMod)==0 ) continue;
+ }
+ sqlite3ViewGetColumnNames(pParse, pTab);
+ if( pTab->u.vtab.p==0 ) continue;
+ pVTab = pTab->u.vtab.p->pVtab;
+ if( NEVER(pVTab==0) ) continue;
+ if( NEVER(pVTab->pModule==0) ) continue;
+ if( pVTab->pModule->iVersion<4 ) continue;
+ if( pVTab->pModule->xIntegrity==0 ) continue;
+ sqlite3VdbeAddOp3(v, OP_VCheck, i, 3, isQuick);
+ pTab->nTabRef++;
+ sqlite3VdbeAppendP4(v, pTab, P4_TABLEREF);
+ a1 = sqlite3VdbeAddOp1(v, OP_IsNull, 3); VdbeCoverage(v);
+ integrityCheckResultRow(v);
+ sqlite3VdbeJumpHere(v, a1);
+#endif
+ continue;
+ }
if( isQuick || HasRowid(pTab) ){
pPk = 0;
r2 = 0;
@@ -137400,7 +140133,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
Schema *pSchema; /* The current schema */
Table *pTab; /* A table in the schema */
Index *pIdx; /* An index of the table */
- LogEst szThreshold; /* Size threshold above which reanalysis is needd */
+ LogEst szThreshold; /* Size threshold above which reanalysis needed */
char *zSubSql; /* SQL statement for the OP_SqlExec opcode */
u32 opMask; /* Mask of operations to perform */
@@ -137892,7 +140625,8 @@ static const sqlite3_module pragmaVtabModule = {
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
/*
@@ -138516,8 +141250,6 @@ SQLITE_PRIVATE void sqlite3ParseObjectReset(Parse *pParse){
db->lookaside.sz = db->lookaside.bDisable ? 0 : db->lookaside.szTrue;
assert( pParse->db->pParse==pParse );
db->pParse = pParse->pOuterParse;
- pParse->db = 0;
- pParse->disableLookaside = 0;
}
/*
@@ -138526,7 +141258,7 @@ SQLITE_PRIVATE void sqlite3ParseObjectReset(Parse *pParse){
** immediately.
**
** Use this mechanism for uncommon cleanups. There is a higher setup
-** cost for this mechansim (an extra malloc), so it should not be used
+** cost for this mechanism (an extra malloc), so it should not be used
** for common cleanups that happen on most calls. But for less
** common cleanups, we save a single NULL-pointer comparison in
** sqlite3ParseObjectReset(), which reduces the total CPU cycle count.
@@ -138618,7 +141350,12 @@ static int sqlite3Prepare(
sParse.pOuterParse = db->pParse;
db->pParse = &sParse;
sParse.db = db;
- sParse.pReprepare = pReprepare;
+ if( pReprepare ){
+ sParse.pReprepare = pReprepare;
+ sParse.explain = sqlite3_stmt_isexplain((sqlite3_stmt*)pReprepare);
+ }else{
+ assert( sParse.pReprepare==0 );
+ }
assert( ppStmt && *ppStmt==0 );
if( db->mallocFailed ){
sqlite3ErrorMsg(&sParse, "out of memory");
@@ -138783,6 +141520,7 @@ static int sqlite3LockAndPrepare(
assert( (rc&db->errMask)==rc );
db->busyHandler.nBusy = 0;
sqlite3_mutex_leave(db->mutex);
+ assert( rc==SQLITE_OK || (*ppStmt)==0 );
return rc;
}
@@ -139180,6 +141918,9 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(
SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3 *db, Select *p){
if( OK_IF_ALWAYS_TRUE(p) ) clearSelect(db, p, 1);
}
+SQLITE_PRIVATE void sqlite3SelectDeleteGeneric(sqlite3 *db, void *p){
+ if( ALWAYS(p) ) clearSelect(db, (Select*)p, 1);
+}
/*
** Return a pointer to the right-most SELECT statement in a compound.
@@ -139228,7 +141969,7 @@ static Select *findRightmost(Select *p){
** NATURAL FULL OUTER JT_NATRUAL|JT_LEFT|JT_RIGHT
**
** To preserve historical compatibly, SQLite also accepts a variety
-** of other non-standard and in many cases non-sensical join types.
+** of other non-standard and in many cases nonsensical join types.
** This routine makes as much sense at it can from the nonsense join
** type and returns a result. Examples of accepted nonsense join types
** include but are not limited to:
@@ -139450,6 +142191,7 @@ static void unsetJoinExpr(Expr *p, int iTable, int nullable){
}
if( p->op==TK_FUNCTION ){
assert( ExprUseXList(p) );
+ assert( p->pLeft==0 );
if( p->x.pList ){
int i;
for(i=0; i<p->x.pList->nExpr; i++){
@@ -139499,7 +142241,7 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){
if( NEVER(pLeft->pTab==0 || pRightTab==0) ) continue;
joinType = (pRight->fg.jointype & JT_OUTER)!=0 ? EP_OuterON : EP_InnerON;
- /* If this is a NATURAL join, synthesize an approprate USING clause
+ /* If this is a NATURAL join, synthesize an appropriate USING clause
** to specify which columns should be joined.
*/
if( pRight->fg.jointype & JT_NATURAL ){
@@ -139715,7 +142457,7 @@ static void pushOntoSorter(
** (3) Some output columns are omitted from the sort record due to
** the SQLITE_ENABLE_SORTER_REFERENCES optimization, or due to the
** SQLITE_ECEL_OMITREF optimization, or due to the
- ** SortCtx.pDeferredRowLoad optimiation. In any of these cases
+ ** SortCtx.pDeferredRowLoad optimization. In any of these cases
** regOrigData is 0 to prevent this routine from trying to copy
** values that might not yet exist.
*/
@@ -139771,7 +142513,7 @@ static void pushOntoSorter(
testcase( pKI->nAllField > pKI->nKeyField+2 );
pOp->p4.pKeyInfo = sqlite3KeyInfoFromExprList(pParse,pSort->pOrderBy,nOBSat,
pKI->nAllField-pKI->nKeyField-1);
- pOp = 0; /* Ensure pOp not used after sqltie3VdbeAddOp3() */
+ pOp = 0; /* Ensure pOp not used after sqlite3VdbeAddOp3() */
addrJmp = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp3(v, OP_Jump, addrJmp+1, 0, addrJmp+1); VdbeCoverage(v);
pSort->labelBkOut = sqlite3VdbeMakeLabel(pParse);
@@ -139865,7 +142607,7 @@ static void codeOffset(
** The returned value in this case is a copy of parameter iTab.
**
** WHERE_DISTINCT_ORDERED:
-** In this case rows are being delivered sorted order. The ephermal
+** In this case rows are being delivered sorted order. The ephemeral
** table is not required. Instead, the current set of values
** is compared against previous row. If they match, the new row
** is not distinct and control jumps to VM address addrRepeat. Otherwise,
@@ -140294,6 +143036,16 @@ static void selectInnerLoop(
testcase( eDest==SRT_Fifo );
testcase( eDest==SRT_DistFifo );
sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg);
+#if !defined(SQLITE_ENABLE_NULL_TRIM) && defined(SQLITE_DEBUG)
+ /* A destination of SRT_Table and a non-zero iSDParm2 parameter means
+ ** that this is an "UPDATE ... FROM" on a virtual table or view. In this
+ ** case set the p5 parameter of the OP_MakeRecord to OPFLAG_NOCHNG_MAGIC.
+ ** This does not affect operation in any way - it just allows MakeRecord
+ ** to process OPFLAG_NOCHANGE values without an assert() failing. */
+ if( eDest==SRT_Table && pDest->iSDParm2 ){
+ sqlite3VdbeChangeP5(v, OPFLAG_NOCHNG_MAGIC);
+ }
+#endif
#ifndef SQLITE_OMIT_CTE
if( eDest==SRT_DistFifo ){
/* If the destination is DistFifo, then cursor (iParm+1) is open
@@ -141097,13 +143849,6 @@ SQLITE_PRIVATE void sqlite3GenerateColumnNames(
int fullName; /* TABLE.COLUMN if no AS clause and is a direct table ref */
int srcName; /* COLUMN or TABLE.COLUMN if no AS clause and is direct */
-#ifndef SQLITE_OMIT_EXPLAIN
- /* If this is an EXPLAIN, skip this step */
- if( pParse->explain ){
- return;
- }
-#endif
-
if( pParse->colNamesSet ) return;
/* Column names are determined by the left-most term of a compound select */
while( pSelect->pPrior ) pSelect = pSelect->pPrior;
@@ -141290,7 +144035,7 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
** kind (maybe a parenthesized subquery in the FROM clause of a larger
** query, or a VIEW, or a CTE). This routine computes type information
** for that Table object based on the Select object that implements the
-** subquery. For the purposes of this routine, "type infomation" means:
+** subquery. For the purposes of this routine, "type information" means:
**
** * The datatype name, as it might appear in a CREATE TABLE statement
** * Which collating sequence to use for the column
@@ -141311,7 +144056,8 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes(
NameContext sNC;
assert( pSelect!=0 );
- assert( (pSelect->selFlags & SF_Resolved)!=0 );
+ testcase( (pSelect->selFlags & SF_Resolved)==0 );
+ assert( (pSelect->selFlags & SF_Resolved)!=0 || IN_RENAME_OBJECT );
assert( pTab->nCol==pSelect->pEList->nExpr || pParse->nErr>0 );
assert( aff==SQLITE_AFF_NONE || aff==SQLITE_AFF_BLOB );
if( db->mallocFailed || IN_RENAME_OBJECT ) return;
@@ -141619,7 +144365,7 @@ static void generateWithRecursiveQuery(
int iQueue; /* The Queue table */
int iDistinct = 0; /* To ensure unique results if UNION */
int eDest = SRT_Fifo; /* How to write to Queue */
- SelectDest destQueue; /* SelectDest targetting the Queue table */
+ SelectDest destQueue; /* SelectDest targeting the Queue table */
int i; /* Loop counter */
int rc; /* Result code */
ExprList *pOrderBy; /* The ORDER BY clause */
@@ -142195,9 +144941,7 @@ multi_select_end:
pDest->iSdst = dest.iSdst;
pDest->nSdst = dest.nSdst;
if( pDelete ){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3SelectDelete,
- pDelete);
+ sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pDelete);
}
return rc;
}
@@ -142219,7 +144963,7 @@ SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p){
/*
** Code an output subroutine for a coroutine implementation of a
-** SELECT statment.
+** SELECT statement.
**
** The data to be output is contained in pIn->iSdst. There are
** pIn->nSdst columns to be output. pDest is where the output should
@@ -142441,7 +145185,7 @@ static int generateOutputSubroutine(
**
** We call AltB, AeqB, AgtB, EofA, and EofB "subroutines" but they are not
** actually called using Gosub and they do not Return. EofA and EofB loop
-** until all data is exhausted then jump to the "end" labe. AltB, AeqB,
+** until all data is exhausted then jump to the "end" label. AltB, AeqB,
** and AgtB jump to either L2 or to one of EofA or EofB.
*/
#ifndef SQLITE_OMIT_COMPOUND_SELECT
@@ -142478,7 +145222,7 @@ static int multiSelectOrderBy(
int savedOffset; /* Saved value of p->iOffset */
int labelCmpr; /* Label for the start of the merge algorithm */
int labelEnd; /* Label for the end of the overall SELECT stmt */
- int addr1; /* Jump instructions that get retargetted */
+ int addr1; /* Jump instructions that get retargeted */
int op; /* One of TK_ALL, TK_UNION, TK_EXCEPT, TK_INTERSECT */
KeyInfo *pKeyDup = 0; /* Comparison information for duplicate removal */
KeyInfo *pKeyMerge; /* Comparison information for merging rows */
@@ -142748,8 +145492,7 @@ static int multiSelectOrderBy(
/* Make arrangements to free the 2nd and subsequent arms of the compound
** after the parse has finished */
if( pSplit->pPrior ){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3SelectDelete, pSplit->pPrior);
+ sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pSplit->pPrior);
}
pSplit->pPrior = pPrior;
pPrior->pNext = pSplit;
@@ -142847,11 +145590,14 @@ static Expr *substExpr(
#endif
{
Expr *pNew;
- int iColumn = pExpr->iColumn;
- Expr *pCopy = pSubst->pEList->a[iColumn].pExpr;
+ int iColumn;
+ Expr *pCopy;
Expr ifNullRow;
+ iColumn = pExpr->iColumn;
+ assert( iColumn>=0 );
assert( pSubst->pEList!=0 && iColumn<pSubst->pEList->nExpr );
assert( pExpr->pRight==0 );
+ pCopy = pSubst->pEList->a[iColumn].pExpr;
if( sqlite3ExprIsVector(pCopy) ){
sqlite3VectorErrorMsg(pSubst->pParse, pCopy);
}else{
@@ -143200,7 +145946,7 @@ static int compoundHasDifferentAffinities(Select *p){
** (9) If the subquery uses LIMIT then the outer query may not be aggregate.
**
** (**) Restriction (10) was removed from the code on 2005-02-05 but we
-** accidently carried the comment forward until 2014-09-15. Original
+** accidentally carried the comment forward until 2014-09-15. Original
** constraint: "If the subquery is aggregate then the outer query
** may not use LIMIT."
**
@@ -143292,7 +146038,8 @@ static int compoundHasDifferentAffinities(Select *p){
** (27b) the subquery is a compound query and the RIGHT JOIN occurs
** in any arm of the compound query. (See also (17g).)
**
-** (28) The subquery is not a MATERIALIZED CTE.
+** (28) The subquery is not a MATERIALIZED CTE. (This is handled
+** in the caller before ever reaching this routine.)
**
**
** In this routine, the "p" parameter is a pointer to the outer query.
@@ -143402,9 +146149,9 @@ static int flattenSubquery(
if( iFrom>0 && (pSubSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){
return 0; /* Restriction (27a) */
}
- if( pSubitem->fg.isCte && pSubitem->u2.pCteUse->eM10d==M10d_Yes ){
- return 0; /* (28) */
- }
+
+ /* Condition (28) is blocked by the caller */
+ assert( !pSubitem->fg.isCte || pSubitem->u2.pCteUse->eM10d!=M10d_Yes );
/* Restriction (17): If the sub-query is a compound SELECT, then it must
** use only the UNION ALL operator. And none of the simple select queries
@@ -143474,7 +146221,7 @@ static int flattenSubquery(
testcase( i==SQLITE_DENY );
pParse->zAuthContext = zSavedAuthContext;
- /* Delete the transient structures associated with thesubquery */
+ /* Delete the transient structures associated with the subquery */
pSub1 = pSubitem->pSelect;
sqlite3DbFree(db, pSubitem->zDatabase);
sqlite3DbFree(db, pSubitem->zName);
@@ -143566,9 +146313,7 @@ static int flattenSubquery(
Table *pTabToDel = pSubitem->pTab;
if( pTabToDel->nTabRef==1 ){
Parse *pToplevel = sqlite3ParseToplevel(pParse);
- sqlite3ParserAddCleanup(pToplevel,
- (void(*)(sqlite3*,void*))sqlite3DeleteTable,
- pTabToDel);
+ sqlite3ParserAddCleanup(pToplevel, sqlite3DeleteTableGeneric, pTabToDel);
testcase( pToplevel->earlyCleanup );
}else{
pTabToDel->nTabRef--;
@@ -143656,7 +146401,7 @@ static int flattenSubquery(
** ORDER BY column expression is identical to the iOrderByCol'th
** expression returned by SELECT statement pSub. Since these values
** do not necessarily correspond to columns in SELECT statement pParent,
- ** zero them before transfering the ORDER BY clause.
+ ** zero them before transferring the ORDER BY clause.
**
** Not doing this may cause an error if a subsequent call to this
** function attempts to flatten a compound sub-query into pParent
@@ -143716,8 +146461,7 @@ static int flattenSubquery(
}
}
- /* Finially, delete what is left of the subquery and return
- ** success.
+ /* Finally, delete what is left of the subquery and return success.
*/
sqlite3AggInfoPersistWalkerInit(&w, pParse);
sqlite3WalkSelect(&w,pSub1);
@@ -143752,7 +146496,7 @@ struct WhereConst {
/*
** Add a new entry to the pConst object. Except, do not add duplicate
-** pColumn entires. Also, do not add if doing so would not be appropriate.
+** pColumn entries. Also, do not add if doing so would not be appropriate.
**
** The caller guarantees the pColumn is a column and pValue is a constant.
** This routine has to do some additional checks before completing the
@@ -143938,7 +146682,7 @@ static int propagateConstantExprRewrite(Walker *pWalker, Expr *pExpr){
** SELECT * FROM t1 WHERE a=123 AND b=123;
**
** The two SELECT statements above should return different answers. b=a
-** is alway true because the comparison uses numeric affinity, but b=123
+** is always true because the comparison uses numeric affinity, but b=123
** is false because it uses text affinity and '0123' is not the same as '123'.
** To work around this, the expression tree is not actually changed from
** "b=a" to "b=123" but rather the "a" in "b=a" is tagged with EP_FixedCol
@@ -144022,7 +146766,7 @@ static int propagateConstants(
** At the time this function is called it is guaranteed that
**
** * the sub-query uses only one distinct window frame, and
-** * that the window frame has a PARTITION BY clase.
+** * that the window frame has a PARTITION BY clause.
*/
static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){
assert( pSubq->pWin->pPartition );
@@ -144291,12 +147035,12 @@ static int disableUnusedSubqueryResultColumns(SrcItem *pItem){
assert( pItem->pSelect!=0 );
pSub = pItem->pSelect;
assert( pSub->pEList->nExpr==pTab->nCol );
- if( (pSub->selFlags & (SF_Distinct|SF_Aggregate))!=0 ){
- testcase( pSub->selFlags & SF_Distinct );
- testcase( pSub->selFlags & SF_Aggregate );
- return 0;
- }
for(pX=pSub; pX; pX=pX->pPrior){
+ if( (pX->selFlags & (SF_Distinct|SF_Aggregate))!=0 ){
+ testcase( pX->selFlags & SF_Distinct );
+ testcase( pX->selFlags & SF_Aggregate );
+ return 0;
+ }
if( pX->pPrior && pX->op!=TK_ALL ){
/* This optimization does not work for compound subqueries that
** use UNION, INTERSECT, or EXCEPT. Only UNION ALL is allowed. */
@@ -144616,8 +147360,7 @@ static struct Cte *searchWith(
SQLITE_PRIVATE With *sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){
if( pWith ){
if( bFree ){
- pWith = (With*)sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3WithDelete,
+ pWith = (With*)sqlite3ParserAddCleanup(pParse, sqlite3WithDeleteGeneric,
pWith);
if( pWith==0 ) return 0;
}
@@ -145102,12 +147845,20 @@ static int selectExpander(Walker *pWalker, Select *p){
** expanded. */
int tableSeen = 0; /* Set to 1 when TABLE matches */
char *zTName = 0; /* text of name of TABLE */
+ int iErrOfst;
if( pE->op==TK_DOT ){
+ assert( (selFlags & SF_NestedFrom)==0 );
assert( pE->pLeft!=0 );
assert( !ExprHasProperty(pE->pLeft, EP_IntValue) );
zTName = pE->pLeft->u.zToken;
+ assert( ExprUseWOfst(pE->pLeft) );
+ iErrOfst = pE->pRight->w.iOfst;
+ }else{
+ assert( ExprUseWOfst(pE) );
+ iErrOfst = pE->w.iOfst;
}
for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
+ int nAdd; /* Number of cols including rowid */
Table *pTab = pFrom->pTab; /* Table for this data source */
ExprList *pNestedFrom; /* Result-set of a nested FROM clause */
char *zTabName; /* AS name for this data source */
@@ -145125,6 +147876,7 @@ static int selectExpander(Walker *pWalker, Select *p){
pNestedFrom = pFrom->pSelect->pEList;
assert( pNestedFrom!=0 );
assert( pNestedFrom->nExpr==pTab->nCol );
+ assert( VisibleRowid(pTab)==0 );
}else{
if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){
continue;
@@ -145142,6 +147894,7 @@ static int selectExpander(Walker *pWalker, Select *p){
for(ii=0; ii<pUsing->nId; ii++){
const char *zUName = pUsing->a[ii].zName;
pRight = sqlite3Expr(db, TK_ID, zUName);
+ sqlite3ExprSetErrorOffset(pRight, iErrOfst);
pNew = sqlite3ExprListAppend(pParse, pNew, pRight);
if( pNew ){
struct ExprList_item *pX = &pNew->a[pNew->nExpr-1];
@@ -145154,33 +147907,48 @@ static int selectExpander(Walker *pWalker, Select *p){
}else{
pUsing = 0;
}
- for(j=0; j<pTab->nCol; j++){
- char *zName = pTab->aCol[j].zCnName;
+
+ nAdd = pTab->nCol + (VisibleRowid(pTab) && (selFlags&SF_NestedFrom));
+ for(j=0; j<nAdd; j++){
+ const char *zName;
struct ExprList_item *pX; /* Newly added ExprList term */
- assert( zName );
- if( zTName
- && pNestedFrom
- && sqlite3MatchEName(&pNestedFrom->a[j], 0, zTName, 0)==0
- ){
- continue;
- }
+ if( j==pTab->nCol ){
+ zName = sqlite3RowidAlias(pTab);
+ if( zName==0 ) continue;
+ }else{
+ zName = pTab->aCol[j].zCnName;
- /* If a column is marked as 'hidden', omit it from the expanded
- ** result-set list unless the SELECT has the SF_IncludeHidden
- ** bit set.
- */
- if( (p->selFlags & SF_IncludeHidden)==0
- && IsHiddenColumn(&pTab->aCol[j])
- ){
- continue;
- }
- if( (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0
- && zTName==0
- && (selFlags & (SF_NestedFrom))==0
- ){
- continue;
+ /* If pTab is actually an SF_NestedFrom sub-select, do not
+ ** expand any ENAME_ROWID columns. */
+ if( pNestedFrom && pNestedFrom->a[j].fg.eEName==ENAME_ROWID ){
+ continue;
+ }
+
+ if( zTName
+ && pNestedFrom
+ && sqlite3MatchEName(&pNestedFrom->a[j], 0, zTName, 0, 0)==0
+ ){
+ continue;
+ }
+
+ /* If a column is marked as 'hidden', omit it from the expanded
+ ** result-set list unless the SELECT has the SF_IncludeHidden
+ ** bit set.
+ */
+ if( (p->selFlags & SF_IncludeHidden)==0
+ && IsHiddenColumn(&pTab->aCol[j])
+ ){
+ continue;
+ }
+ if( (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0
+ && zTName==0
+ && (selFlags & (SF_NestedFrom))==0
+ ){
+ continue;
+ }
}
+ assert( zName );
tableSeen = 1;
if( i>0 && zTName==0 && (selFlags & SF_NestedFrom)==0 ){
@@ -145214,6 +147982,7 @@ static int selectExpander(Walker *pWalker, Select *p){
}else{
pExpr = pRight;
}
+ sqlite3ExprSetErrorOffset(pExpr, iErrOfst);
pNew = sqlite3ExprListAppend(pParse, pNew, pExpr);
if( pNew==0 ){
break; /* OOM */
@@ -145229,11 +147998,11 @@ static int selectExpander(Walker *pWalker, Select *p){
zSchemaName, zTabName, zName);
testcase( pX->zEName==0 );
}
- pX->fg.eEName = ENAME_TAB;
+ pX->fg.eEName = (j==pTab->nCol ? ENAME_ROWID : ENAME_TAB);
if( (pFrom->fg.isUsing
&& sqlite3IdListIndex(pFrom->u3.pUsing, zName)>=0)
|| (pUsing && sqlite3IdListIndex(pUsing, zName)>=0)
- || (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0
+ || (j<pTab->nCol && (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND))
){
pX->fg.bNoExpand = 1;
}
@@ -145335,10 +148104,11 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){
SrcList *pTabList;
SrcItem *pFrom;
- assert( p->selFlags & SF_Resolved );
if( p->selFlags & SF_HasTypeInfo ) return;
p->selFlags |= SF_HasTypeInfo;
pParse = pWalker->pParse;
+ testcase( (p->selFlags & SF_Resolved)==0 );
+ assert( (p->selFlags & SF_Resolved) || IN_RENAME_OBJECT );
pTabList = p->pSrc;
for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
Table *pTab = pFrom->pTab;
@@ -145454,8 +148224,14 @@ static void analyzeAggFuncArgs(
pNC->ncFlags |= NC_InAggFunc;
for(i=0; i<pAggInfo->nFunc; i++){
Expr *pExpr = pAggInfo->aFunc[i].pFExpr;
+ assert( pExpr->op==TK_FUNCTION || pExpr->op==TK_AGG_FUNCTION );
assert( ExprUseXList(pExpr) );
sqlite3ExprAnalyzeAggList(pNC, pExpr->x.pList);
+ if( pExpr->pLeft ){
+ assert( pExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pExpr->pLeft) );
+ sqlite3ExprAnalyzeAggList(pNC, pExpr->pLeft->x.pList);
+ }
#ifndef SQLITE_OMIT_WINDOWFUNC
assert( !IsWindowFunc(pExpr) );
if( ExprHasProperty(pExpr, EP_WinFunc) ){
@@ -145530,7 +148306,7 @@ static int aggregateIdxEprRefToColCallback(Walker *pWalker, Expr *pExpr){
pExpr->op = TK_AGG_COLUMN;
pExpr->iTable = pCol->iTable;
pExpr->iColumn = pCol->iColumn;
- ExprClearProperty(pExpr, EP_Skip|EP_Collate);
+ ExprClearProperty(pExpr, EP_Skip|EP_Collate|EP_Unlikely);
return WRC_Prune;
}
@@ -145561,7 +148337,7 @@ static void aggregateConvertIndexedExprRefToColumn(AggInfo *pAggInfo){
** * The aCol[] and aFunc[] arrays may be modified
** * The AggInfoColumnReg() and AggInfoFuncReg() macros may not be used
**
-** After clling this routine:
+** After calling this routine:
**
** * The aCol[] and aFunc[] arrays are fixed
** * The AggInfoColumnReg() and AggInfoFuncReg() macros may be used
@@ -145610,6 +148386,36 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){
pFunc->pFunc->zName));
}
}
+ if( pFunc->iOBTab>=0 ){
+ ExprList *pOBList;
+ KeyInfo *pKeyInfo;
+ int nExtra = 0;
+ assert( pFunc->pFExpr->pLeft!=0 );
+ assert( pFunc->pFExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pFunc->pFExpr->pLeft) );
+ assert( pFunc->pFunc!=0 );
+ pOBList = pFunc->pFExpr->pLeft->x.pList;
+ if( !pFunc->bOBUnique ){
+ nExtra++; /* One extra column for the OP_Sequence */
+ }
+ if( pFunc->bOBPayload ){
+ /* extra columns for the function arguments */
+ assert( ExprUseXList(pFunc->pFExpr) );
+ nExtra += pFunc->pFExpr->x.pList->nExpr;
+ }
+ if( pFunc->bUseSubtype ){
+ nExtra += pFunc->pFExpr->x.pList->nExpr;
+ }
+ pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pOBList, 0, nExtra);
+ if( !pFunc->bOBUnique && pParse->nErr==0 ){
+ pKeyInfo->nKeyField++;
+ }
+ sqlite3VdbeAddOp4(v, OP_OpenEphemeral,
+ pFunc->iOBTab, pOBList->nExpr+nExtra, 0,
+ (char*)pKeyInfo, P4_KEYINFO);
+ ExplainQueryPlan((pParse, 0, "USE TEMP B-TREE FOR %s(ORDER BY)",
+ pFunc->pFunc->zName));
+ }
}
}
@@ -145625,13 +148431,56 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){
ExprList *pList;
assert( ExprUseXList(pF->pFExpr) );
pList = pF->pFExpr->x.pList;
+ if( pF->iOBTab>=0 ){
+ /* For an ORDER BY aggregate, calls to OP_AggStep were deferred. Inputs
+ ** were stored in emphermal table pF->iOBTab. Here, we extract those
+ ** inputs (in ORDER BY order) and make all calls to OP_AggStep
+ ** before doing the OP_AggFinal call. */
+ int iTop; /* Start of loop for extracting columns */
+ int nArg; /* Number of columns to extract */
+ int nKey; /* Key columns to be skipped */
+ int regAgg; /* Extract into this array */
+ int j; /* Loop counter */
+
+ assert( pF->pFunc!=0 );
+ nArg = pList->nExpr;
+ regAgg = sqlite3GetTempRange(pParse, nArg);
+
+ if( pF->bOBPayload==0 ){
+ nKey = 0;
+ }else{
+ assert( pF->pFExpr->pLeft!=0 );
+ assert( ExprUseXList(pF->pFExpr->pLeft) );
+ assert( pF->pFExpr->pLeft->x.pList!=0 );
+ nKey = pF->pFExpr->pLeft->x.pList->nExpr;
+ if( ALWAYS(!pF->bOBUnique) ) nKey++;
+ }
+ iTop = sqlite3VdbeAddOp1(v, OP_Rewind, pF->iOBTab); VdbeCoverage(v);
+ for(j=nArg-1; j>=0; j--){
+ sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, nKey+j, regAgg+j);
+ }
+ if( pF->bUseSubtype ){
+ int regSubtype = sqlite3GetTempReg(pParse);
+ int iBaseCol = nKey + nArg + (pF->bOBPayload==0 && pF->bOBUnique==0);
+ for(j=nArg-1; j>=0; j--){
+ sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, iBaseCol+j, regSubtype);
+ sqlite3VdbeAddOp2(v, OP_SetSubtype, regSubtype, regAgg+j);
+ }
+ sqlite3ReleaseTempReg(pParse, regSubtype);
+ }
+ sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i));
+ sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
+ sqlite3VdbeChangeP5(v, (u8)nArg);
+ sqlite3VdbeAddOp2(v, OP_Next, pF->iOBTab, iTop+1); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, iTop);
+ sqlite3ReleaseTempRange(pParse, regAgg, nArg);
+ }
sqlite3VdbeAddOp2(v, OP_AggFinal, AggInfoFuncReg(pAggInfo,i),
pList ? pList->nExpr : 0);
sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
}
}
-
/*
** Generate code that will update the accumulator memory cells for an
** aggregate based on the current cursor position.
@@ -145640,6 +148489,13 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){
** in pAggInfo, then only populate the pAggInfo->nAccumulator accumulator
** registers if register regAcc contains 0. The caller will take care
** of setting and clearing regAcc.
+**
+** For an ORDER BY aggregate, the actual accumulator memory cell update
+** is deferred until after all input rows have been received, so that they
+** can be run in the requested order. In that case, instead of invoking
+** OP_AggStep to update the accumulator, just add the arguments that would
+** have been passed into OP_AggStep into the sorting ephemeral table
+** (along with the appropriate sort key).
*/
static void updateAccumulator(
Parse *pParse,
@@ -145661,9 +148517,12 @@ static void updateAccumulator(
int nArg;
int addrNext = 0;
int regAgg;
+ int regAggSz = 0;
+ int regDistinct = 0;
ExprList *pList;
assert( ExprUseXList(pF->pFExpr) );
assert( !IsWindowFunc(pF->pFExpr) );
+ assert( pF->pFunc!=0 );
pList = pF->pFExpr->x.pList;
if( ExprHasProperty(pF->pFExpr, EP_WinFunc) ){
Expr *pFilter = pF->pFExpr->y.pWin->pFilter;
@@ -145687,9 +148546,55 @@ static void updateAccumulator(
addrNext = sqlite3VdbeMakeLabel(pParse);
sqlite3ExprIfFalse(pParse, pFilter, addrNext, SQLITE_JUMPIFNULL);
}
- if( pList ){
+ if( pF->iOBTab>=0 ){
+ /* Instead of invoking AggStep, we must push the arguments that would
+ ** have been passed to AggStep onto the sorting table. */
+ int jj; /* Registered used so far in building the record */
+ ExprList *pOBList; /* The ORDER BY clause */
+ assert( pList!=0 );
+ nArg = pList->nExpr;
+ assert( nArg>0 );
+ assert( pF->pFExpr->pLeft!=0 );
+ assert( pF->pFExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pF->pFExpr->pLeft) );
+ pOBList = pF->pFExpr->pLeft->x.pList;
+ assert( pOBList!=0 );
+ assert( pOBList->nExpr>0 );
+ regAggSz = pOBList->nExpr;
+ if( !pF->bOBUnique ){
+ regAggSz++; /* One register for OP_Sequence */
+ }
+ if( pF->bOBPayload ){
+ regAggSz += nArg;
+ }
+ if( pF->bUseSubtype ){
+ regAggSz += nArg;
+ }
+ regAggSz++; /* One extra register to hold result of MakeRecord */
+ regAgg = sqlite3GetTempRange(pParse, regAggSz);
+ regDistinct = regAgg;
+ sqlite3ExprCodeExprList(pParse, pOBList, regAgg, 0, SQLITE_ECEL_DUP);
+ jj = pOBList->nExpr;
+ if( !pF->bOBUnique ){
+ sqlite3VdbeAddOp2(v, OP_Sequence, pF->iOBTab, regAgg+jj);
+ jj++;
+ }
+ if( pF->bOBPayload ){
+ regDistinct = regAgg+jj;
+ sqlite3ExprCodeExprList(pParse, pList, regDistinct, 0, SQLITE_ECEL_DUP);
+ jj += nArg;
+ }
+ if( pF->bUseSubtype ){
+ int kk;
+ int regBase = pF->bOBPayload ? regDistinct : regAgg;
+ for(kk=0; kk<nArg; kk++, jj++){
+ sqlite3VdbeAddOp2(v, OP_GetSubtype, regBase+kk, regAgg+jj);
+ }
+ }
+ }else if( pList ){
nArg = pList->nExpr;
regAgg = sqlite3GetTempRange(pParse, nArg);
+ regDistinct = regAgg;
sqlite3ExprCodeExprList(pParse, pList, regAgg, 0, SQLITE_ECEL_DUP);
}else{
nArg = 0;
@@ -145700,26 +148605,37 @@ static void updateAccumulator(
addrNext = sqlite3VdbeMakeLabel(pParse);
}
pF->iDistinct = codeDistinct(pParse, eDistinctType,
- pF->iDistinct, addrNext, pList, regAgg);
- }
- if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){
- CollSeq *pColl = 0;
- struct ExprList_item *pItem;
- int j;
- assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */
- for(j=0, pItem=pList->a; !pColl && j<nArg; j++, pItem++){
- pColl = sqlite3ExprCollSeq(pParse, pItem->pExpr);
- }
- if( !pColl ){
- pColl = pParse->db->pDfltColl;
+ pF->iDistinct, addrNext, pList, regDistinct);
+ }
+ if( pF->iOBTab>=0 ){
+ /* Insert a new record into the ORDER BY table */
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regAgg, regAggSz-1,
+ regAgg+regAggSz-1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pF->iOBTab, regAgg+regAggSz-1,
+ regAgg, regAggSz-1);
+ sqlite3ReleaseTempRange(pParse, regAgg, regAggSz);
+ }else{
+ /* Invoke the AggStep function */
+ if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){
+ CollSeq *pColl = 0;
+ struct ExprList_item *pItem;
+ int j;
+ assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */
+ for(j=0, pItem=pList->a; !pColl && j<nArg; j++, pItem++){
+ pColl = sqlite3ExprCollSeq(pParse, pItem->pExpr);
+ }
+ if( !pColl ){
+ pColl = pParse->db->pDfltColl;
+ }
+ if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem;
+ sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0,
+ (char *)pColl, P4_COLLSEQ);
}
- if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem;
- sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ);
+ sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i));
+ sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
+ sqlite3VdbeChangeP5(v, (u8)nArg);
+ sqlite3ReleaseTempRange(pParse, regAgg, nArg);
}
- sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i));
- sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
- sqlite3VdbeChangeP5(v, (u8)nArg);
- sqlite3ReleaseTempRange(pParse, regAgg, nArg);
if( addrNext ){
sqlite3VdbeResolveLabel(v, addrNext);
}
@@ -145878,7 +148794,8 @@ static SrcItem *isSelfJoinView(
/*
** Deallocate a single AggInfo object
*/
-static void agginfoFree(sqlite3 *db, AggInfo *p){
+static void agginfoFree(sqlite3 *db, void *pArg){
+ AggInfo *p = (AggInfo*)pArg;
sqlite3DbFree(db, p->aCol);
sqlite3DbFree(db, p->aFunc);
sqlite3DbFreeNN(db, p);
@@ -145952,7 +148869,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){
pSub->selFlags |= SF_Aggregate;
pSub->selFlags &= ~SF_Compound;
pSub->nSelectRow = 0;
- sqlite3ExprListDelete(db, pSub->pEList);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pSub->pEList);
pTerm = pPrior ? sqlite3ExprDup(db, pCount, 0) : pCount;
pSub->pEList = sqlite3ExprListAppend(pParse, 0, pTerm);
pTerm = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
@@ -146132,9 +149049,8 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3TreeViewExprList(0, p->pOrderBy, 0, "ORDERBY");
}
#endif
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3ExprListDelete,
- p->pOrderBy);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric,
+ p->pOrderBy);
testcase( pParse->earlyCleanup );
p->pOrderBy = 0;
}
@@ -146215,22 +149131,58 @@ SQLITE_PRIVATE int sqlite3Select(
** to a real table */
assert( pTab!=0 );
- /* Convert LEFT JOIN into JOIN if there are terms of the right table
- ** of the LEFT JOIN used in the WHERE clause.
+ /* Try to simplify joins:
+ **
+ ** LEFT JOIN -> JOIN
+ ** RIGHT JOIN -> JOIN
+ ** FULL JOIN -> RIGHT JOIN
+ **
+ ** If terms of the i-th table are used in the WHERE clause in such a
+ ** way that the i-th table cannot be the NULL row of a join, then
+ ** perform the appropriate simplification. This is called
+ ** "OUTER JOIN strength reduction" in the SQLite documentation.
*/
- if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))==JT_LEFT
- && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor)
+ if( (pItem->fg.jointype & (JT_LEFT|JT_LTORJ))!=0
+ && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor,
+ pItem->fg.jointype & JT_LTORJ)
&& OptimizationEnabled(db, SQLITE_SimplifyJoin)
){
- TREETRACE(0x1000,pParse,p,
- ("LEFT-JOIN simplifies to JOIN on term %d\n",i));
- pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER);
- assert( pItem->iCursor>=0 );
- unsetJoinExpr(p->pWhere, pItem->iCursor,
- pTabList->a[0].fg.jointype & JT_LTORJ);
+ if( pItem->fg.jointype & JT_LEFT ){
+ if( pItem->fg.jointype & JT_RIGHT ){
+ TREETRACE(0x1000,pParse,p,
+ ("FULL-JOIN simplifies to RIGHT-JOIN on term %d\n",i));
+ pItem->fg.jointype &= ~JT_LEFT;
+ }else{
+ TREETRACE(0x1000,pParse,p,
+ ("LEFT-JOIN simplifies to JOIN on term %d\n",i));
+ pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER);
+ unsetJoinExpr(p->pWhere, pItem->iCursor, 0);
+ }
+ }
+ if( pItem->fg.jointype & JT_LTORJ ){
+ for(j=i+1; j<pTabList->nSrc; j++){
+ SrcItem *pI2 = &pTabList->a[j];
+ if( pI2->fg.jointype & JT_RIGHT ){
+ if( pI2->fg.jointype & JT_LEFT ){
+ TREETRACE(0x1000,pParse,p,
+ ("FULL-JOIN simplifies to LEFT-JOIN on term %d\n",j));
+ pI2->fg.jointype &= ~JT_RIGHT;
+ }else{
+ TREETRACE(0x1000,pParse,p,
+ ("RIGHT-JOIN simplifies to JOIN on term %d\n",j));
+ pI2->fg.jointype &= ~(JT_RIGHT|JT_OUTER);
+ unsetJoinExpr(p->pWhere, pI2->iCursor, 1);
+ }
+ }
+ }
+ for(j=pTabList->nSrc-1; j>=0; j--){
+ pTabList->a[j].fg.jointype &= ~JT_LTORJ;
+ if( pTabList->a[j].fg.jointype & JT_RIGHT ) break;
+ }
+ }
}
- /* No futher action if this term of the FROM clause is not a subquery */
+ /* No further action if this term of the FROM clause is not a subquery */
if( pSub==0 ) continue;
/* Catch mismatch in the declared columns of a view and the number of
@@ -146241,6 +149193,14 @@ SQLITE_PRIVATE int sqlite3Select(
goto select_end;
}
+ /* Do not attempt the usual optimizations (flattening and ORDER BY
+ ** elimination) on a MATERIALIZED common table expression because
+ ** a MATERIALIZED common table expression is an optimization fence.
+ */
+ if( pItem->fg.isCte && pItem->u2.pCteUse->eM10d==M10d_Yes ){
+ continue;
+ }
+
/* Do not try to flatten an aggregate subquery.
**
** Flattening an aggregate subquery is only possible if the outer query
@@ -146270,6 +149230,8 @@ SQLITE_PRIVATE int sqlite3Select(
** (a) The outer query has a different ORDER BY clause
** (b) The subquery is part of a join
** See forum post 062d576715d277c8
+ **
+ ** Also retain the ORDER BY if the OmitOrderBy optimization is disabled.
*/
if( pSub->pOrderBy!=0
&& (p->pOrderBy!=0 || pTabList->nSrc>1) /* Condition (5) */
@@ -146280,9 +149242,8 @@ SQLITE_PRIVATE int sqlite3Select(
){
TREETRACE(0x800,pParse,p,
("omit superfluous ORDER BY on %r FROM-clause subquery\n",i+1));
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3ExprListDelete,
- pSub->pOrderBy);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric,
+ pSub->pOrderBy);
pSub->pOrderBy = 0;
}
@@ -146484,7 +149445,7 @@ SQLITE_PRIVATE int sqlite3Select(
}else if( pItem->fg.isCte && pItem->u2.pCteUse->addrM9e>0 ){
/* This is a CTE for which materialization code has already been
** generated. Invoke the subroutine to compute the materialization,
- ** the make the pItem->iCursor be a copy of the ephemerial table that
+ ** the make the pItem->iCursor be a copy of the ephemeral table that
** holds the result of the materialization. */
CteUse *pCteUse = pItem->u2.pCteUse;
sqlite3VdbeAddOp2(v, OP_Gosub, pCteUse->regRtn, pCteUse->addrM9e);
@@ -146811,8 +149772,7 @@ SQLITE_PRIVATE int sqlite3Select(
*/
pAggInfo = sqlite3DbMallocZero(db, sizeof(*pAggInfo) );
if( pAggInfo ){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))agginfoFree, pAggInfo);
+ sqlite3ParserAddCleanup(pParse, agginfoFree, pAggInfo);
testcase( pParse->earlyCleanup );
}
if( db->mallocFailed ){
@@ -146867,7 +149827,7 @@ SQLITE_PRIVATE int sqlite3Select(
*/
if( pGroupBy ){
KeyInfo *pKeyInfo; /* Keying information for the group by clause */
- int addr1; /* A-vs-B comparision jump */
+ int addr1; /* A-vs-B comparison jump */
int addrOutputRow; /* Start of subroutine that outputs a result row */
int regOutputRow; /* Return address register for output subroutine */
int addrSetAbort; /* Set the abort flag and return */
@@ -146958,9 +149918,13 @@ SQLITE_PRIVATE int sqlite3Select(
int nCol;
int nGroupBy;
- explainTempTable(pParse,
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ int addrExp; /* Address of OP_Explain instruction */
+#endif
+ ExplainQueryPlan2(addrExp, (pParse, 0, "USE TEMP B-TREE FOR %s",
(sDistinct.isTnct && (p->selFlags&SF_Distinct)==0) ?
- "DISTINCT" : "GROUP BY");
+ "DISTINCT" : "GROUP BY"
+ ));
groupBySort = 1;
nGroupBy = pGroupBy->nExpr;
@@ -146985,18 +149949,23 @@ SQLITE_PRIVATE int sqlite3Select(
}
pAggInfo->directMode = 0;
regRecord = sqlite3GetTempReg(pParse);
+ sqlite3VdbeScanStatusCounters(v, addrExp, 0, sqlite3VdbeCurrentAddr(v));
sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regRecord);
sqlite3VdbeAddOp2(v, OP_SorterInsert, pAggInfo->sortingIdx, regRecord);
+ sqlite3VdbeScanStatusRange(v, addrExp, sqlite3VdbeCurrentAddr(v)-2, -1);
sqlite3ReleaseTempReg(pParse, regRecord);
sqlite3ReleaseTempRange(pParse, regBase, nCol);
TREETRACE(0x2,pParse,p,("WhereEnd\n"));
sqlite3WhereEnd(pWInfo);
pAggInfo->sortingIdxPTab = sortPTab = pParse->nTab++;
sortOut = sqlite3GetTempReg(pParse);
+ sqlite3VdbeScanStatusCounters(v, addrExp, sqlite3VdbeCurrentAddr(v), 0);
sqlite3VdbeAddOp3(v, OP_OpenPseudo, sortPTab, sortOut, nCol);
sqlite3VdbeAddOp2(v, OP_SorterSort, pAggInfo->sortingIdx, addrEnd);
VdbeComment((v, "GROUP BY sort")); VdbeCoverage(v);
pAggInfo->useSortingIdx = 1;
+ sqlite3VdbeScanStatusRange(v, addrExp, -1, sortPTab);
+ sqlite3VdbeScanStatusRange(v, addrExp, -1, pAggInfo->sortingIdx);
}
/* If there are entries in pAgggInfo->aFunc[] that contain subexpressions
@@ -147724,6 +150693,10 @@ SQLITE_PRIVATE void sqlite3BeginTrigger(
sqlite3ErrorMsg(pParse, "cannot create triggers on virtual tables");
goto trigger_orphan_error;
}
+ if( (pTab->tabFlags & TF_Shadow)!=0 && sqlite3ReadOnlyShadowTables(db) ){
+ sqlite3ErrorMsg(pParse, "cannot create triggers on shadow tables");
+ goto trigger_orphan_error;
+ }
/* Check that the trigger name is not reserved and that no trigger of the
** specified name exists */
@@ -148507,10 +151480,17 @@ static void codeReturningTrigger(
SrcList sFrom;
assert( v!=0 );
- assert( pParse->bReturning );
+ if( !pParse->bReturning ){
+ /* This RETURNING trigger must be for a different statement as
+ ** this statement lacks a RETURNING clause. */
+ return;
+ }
assert( db->pParse==pParse );
pReturning = pParse->u1.pReturning;
- assert( pTrigger == &(pReturning->retTrig) );
+ if( pTrigger != &(pReturning->retTrig) ){
+ /* This RETURNING trigger is for a different statement */
+ return;
+ }
memset(&sSelect, 0, sizeof(sSelect));
memset(&sFrom, 0, sizeof(sFrom));
sSelect.pEList = sqlite3ExprListDup(db, pReturning->pReturnEL, 0);
@@ -149247,7 +152227,7 @@ static void updateFromSelect(
assert( pTabList->nSrc>1 );
if( pSrc ){
- pSrc->a[0].fg.notCte = 1;
+ assert( pSrc->a[0].fg.notCte );
pSrc->a[0].iCursor = -1;
pSrc->a[0].pTab->nTabRef--;
pSrc->a[0].pTab = 0;
@@ -149764,7 +152744,7 @@ SQLITE_PRIVATE void sqlite3Update(
&& !hasFK
&& !chngKey
&& !bReplace
- && (sNC.ncFlags & NC_Subquery)==0
+ && (pWhere==0 || !ExprHasProperty(pWhere, EP_Subquery))
){
flags |= WHERE_ONEPASS_MULTIROW;
}
@@ -149836,6 +152816,8 @@ SQLITE_PRIVATE void sqlite3Update(
if( !isView ){
int addrOnce = 0;
+ int iNotUsed1 = 0;
+ int iNotUsed2 = 0;
/* Open every index that needs updating. */
if( eOnePass!=ONEPASS_OFF ){
@@ -149847,7 +152829,7 @@ SQLITE_PRIVATE void sqlite3Update(
addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
}
sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, 0, iBaseCur,
- aToOpen, 0, 0);
+ aToOpen, &iNotUsed1, &iNotUsed2);
if( addrOnce ){
sqlite3VdbeJumpHereOrPopInst(v, addrOnce);
}
@@ -150138,8 +153120,10 @@ SQLITE_PRIVATE void sqlite3Update(
sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1);
}
- sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges,
- TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue);
+ if( pTrigger ){
+ sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges,
+ TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue);
+ }
/* Repeat the above with the next record to be updated, until
** all record selected by the WHERE clause have been updated.
@@ -150234,7 +153218,7 @@ static void updateVirtualTable(
int nArg = 2 + pTab->nCol; /* Number of arguments to VUpdate */
int regArg; /* First register in VUpdate arg array */
int regRec; /* Register in which to assemble record */
- int regRowid; /* Register for ephem table rowid */
+ int regRowid; /* Register for ephemeral table rowid */
int iCsr = pSrc->a[0].iCursor; /* Cursor used for virtual table scan */
int aDummy[2]; /* Unused arg for sqlite3WhereOkOnePass() */
int eOnePass; /* True to use onepass strategy */
@@ -150278,7 +153262,9 @@ static void updateVirtualTable(
sqlite3ExprDup(db, pChanges->a[aXRef[i]].pExpr, 0)
);
}else{
- pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i));
+ Expr *pRowExpr = exprRowColumn(pParse, i);
+ if( pRowExpr ) pRowExpr->op2 = OPFLAG_NOCHNG;
+ pList = sqlite3ExprListAppend(pParse, pList, pRowExpr);
}
}
@@ -150355,7 +153341,7 @@ static void updateVirtualTable(
sqlite3WhereEnd(pWInfo);
}
- /* Begin scannning through the ephemeral table. */
+ /* Begin scanning through the ephemeral table. */
addr = sqlite3VdbeAddOp1(v, OP_Rewind, ephemTab); VdbeCoverage(v);
/* Extract arguments from the current row of the ephemeral table and
@@ -150563,7 +153549,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget(
pExpr = &sCol[0];
}
for(jj=0; jj<nn; jj++){
- if( sqlite3ExprCompare(pParse,pTarget->a[jj].pExpr,pExpr,iCursor)<2 ){
+ if( sqlite3ExprCompare(0,pTarget->a[jj].pExpr,pExpr,iCursor)<2 ){
break; /* Column ii of the index matches column jj of target */
}
}
@@ -150912,7 +153898,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum(
** (possibly synchronous) transaction opened on the main database before
** sqlite3BtreeCopyFile() is called.
**
- ** An optimisation would be to use a non-journaled pager.
+ ** An optimization would be to use a non-journaled pager.
** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but
** that actually made the VACUUM run slower. Very little journalling
** actually occurs when doing a vacuum since the vacuum_db is initially
@@ -151435,7 +154421,6 @@ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){
if( p ){
db->pDisconnect = 0;
- sqlite3ExpirePreparedStatements(db, 0);
do {
VTable *pNext = p->pNext;
sqlite3VtabUnlock(p);
@@ -151601,7 +154586,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){
** the information we've collected.
**
** The VM register number pParse->regRowid holds the rowid of an
- ** entry in the sqlite_schema table tht was created for this vtab
+ ** entry in the sqlite_schema table that was created for this vtab
** by sqlite3StartTable().
*/
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
@@ -151941,7 +154926,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){
sqlite3_mutex_enter(db->mutex);
pCtx = db->pVtabCtx;
if( !pCtx || pCtx->bDeclared ){
- sqlite3Error(db, SQLITE_MISUSE);
+ sqlite3Error(db, SQLITE_MISUSE_BKPT);
sqlite3_mutex_leave(db->mutex);
return SQLITE_MISUSE_BKPT;
}
@@ -152345,7 +155330,7 @@ SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){
**
** An eponymous virtual table instance is one that is named after its
** module, and more importantly, does not require a CREATE VIRTUAL TABLE
-** statement in order to come into existance. Eponymous virtual table
+** statement in order to come into existence. Eponymous virtual table
** instances always exist. They cannot be DROP-ed.
**
** Any virtual table module for which xConnect and xCreate are the same
@@ -152536,7 +155521,7 @@ typedef struct WhereRightJoin WhereRightJoin;
/*
** This object is a header on a block of allocated memory that will be
-** automatically freed when its WInfo oject is destructed.
+** automatically freed when its WInfo object is destructed.
*/
struct WhereMemBlock {
WhereMemBlock *pNext; /* Next block in the chain */
@@ -152597,7 +155582,7 @@ struct WhereLevel {
int iCur; /* The VDBE cursor used by this IN operator */
int addrInTop; /* Top of the IN loop */
int iBase; /* Base register of multi-key index record */
- int nPrefix; /* Number of prior entires in the key */
+ int nPrefix; /* Number of prior entries in the key */
u8 eEndLoopOp; /* IN Loop terminator. OP_Next or OP_Prev */
} *aInLoop; /* Information about each nested IN operator */
} in; /* Used when pWLoop->wsFlags&WHERE_IN_ABLE */
@@ -152847,7 +155832,7 @@ struct WhereClause {
int nTerm; /* Number of terms */
int nSlot; /* Number of entries in a[] */
int nBase; /* Number of terms through the last non-Virtual */
- WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */
+ WhereTerm *a; /* Each a[] describes a term of the WHERE clause */
#if defined(SQLITE_SMALL_STACK)
WhereTerm aStatic[1]; /* Initial static space for a[] */
#else
@@ -153001,7 +155986,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet*,int);
#ifdef WHERETRACE_ENABLED
SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC);
SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm);
-SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC);
+SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC);
#endif
SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm(
WhereClause *pWC, /* The WHERE clause to be searched */
@@ -153132,7 +156117,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*);
#define WHERE_BLOOMFILTER 0x00400000 /* Consider using a Bloom-filter */
#define WHERE_SELFCULL 0x00800000 /* nOut reduced by extra WHERE terms */
#define WHERE_OMIT_OFFSET 0x01000000 /* Set offset counter to zero */
-#define WHERE_VIEWSCAN 0x02000000 /* A full-scan of a VIEW or subquery */
+ /* 0x02000000 -- available for reuse */
#define WHERE_EXPRIDX 0x04000000 /* Uses an index-on-expressions */
#endif /* !defined(SQLITE_WHEREINT_H) */
@@ -153435,6 +156420,12 @@ SQLITE_PRIVATE void sqlite3WhereAddScanStatus(
if( wsFlags & WHERE_INDEXED ){
sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iIdxCur);
}
+ }else{
+ int addr = pSrclist->a[pLvl->iFrom].addrFillSub;
+ VdbeOp *pOp = sqlite3VdbeGetOp(v, addr-1);
+ assert( sqlite3VdbeDb(v)->mallocFailed || pOp->opcode==OP_InitCoroutine );
+ assert( sqlite3VdbeDb(v)->mallocFailed || pOp->p2>addr );
+ sqlite3VdbeScanStatusRange(v, addrExplain, addr, pOp->p2-1);
}
}
}
@@ -153932,7 +156923,7 @@ static int codeAllEqualityTerms(
/* Figure out how many memory cells we will need then allocate them.
*/
regBase = pParse->nMem + 1;
- nReg = pLoop->u.btree.nEq + nExtraReg;
+ nReg = nEq + nExtraReg;
pParse->nMem += nReg;
zAff = sqlite3DbStrDup(pParse->db,sqlite3IndexAffinityStr(pParse->db,pIdx));
@@ -153979,9 +156970,6 @@ static int codeAllEqualityTerms(
sqlite3VdbeAddOp2(v, OP_Copy, r1, regBase+j);
}
}
- }
- for(j=nSkip; j<nEq; j++){
- pTerm = pLoop->aLTerm[j];
if( pTerm->eOperator & WO_IN ){
if( pTerm->pExpr->flags & EP_xIsSelect ){
/* No affinity ever needs to be (or should be) applied to a value
@@ -154124,7 +157112,7 @@ static int codeCursorHintIsOrFunction(Walker *pWalker, Expr *pExpr){
** 2) transform the expression node to a TK_REGISTER node that reads
** from the newly populated register.
**
-** Also, if the node is a TK_COLUMN that does access the table idenified
+** Also, if the node is a TK_COLUMN that does access the table identified
** by pCCurHint.iTabCur, and an index is being used (which we will
** know because CCurHint.pIdx!=0) then transform the TK_COLUMN into
** an access of the index rather than the original table.
@@ -154742,7 +157730,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
};
assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */
assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */
- assert( TK_GE==TK_GT+3 ); /* ... is correcct. */
+ assert( TK_GE==TK_GT+3 ); /* ... is correct. */
assert( (pStart->wtFlags & TERM_VNULL)==0 );
testcase( pStart->wtFlags & TERM_VIRTUAL );
@@ -155922,7 +158910,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop(
** the WHERE clause of SQL statements.
**
** This file was originally part of where.c but was split out to improve
-** readability and editabiliity. This file contains utility routines for
+** readability and editability. This file contains utility routines for
** analyzing Expr objects in the WHERE clause.
*/
/* #include "sqliteInt.h" */
@@ -156138,7 +159126,7 @@ static int isLikeOrGlob(
** range search. The third is because the caller assumes that the pattern
** consists of at least one character after all escapes have been
** removed. */
- if( cnt!=0 && 255!=(u8)z[cnt-1] && (cnt>1 || z[0]!=wc[3]) ){
+ if( (cnt>1 || (cnt>0 && z[0]!=wc[3])) && 255!=(u8)z[cnt-1] ){
Expr *pPrefix;
/* A "complete" match if the pattern ends with "*" or "%" */
@@ -156711,7 +159699,7 @@ static void exprAnalyzeOrTerm(
pOrTerm->leftCursor))==0 ){
/* This term must be of the form t1.a==t2.b where t2 is in the
** chngToIN set but t1 is not. This term will be either preceded
- ** or follwed by an inverted copy (t2.b==t1.a). Skip this term
+ ** or followed by an inverted copy (t2.b==t1.a). Skip this term
** and use its inversion. */
testcase( pOrTerm->wtFlags & TERM_COPIED );
testcase( pOrTerm->wtFlags & TERM_VIRTUAL );
@@ -156973,8 +159961,8 @@ static void exprAnalyze(
WhereTerm *pTerm; /* The term to be analyzed */
WhereMaskSet *pMaskSet; /* Set of table index masks */
Expr *pExpr; /* The expression to be analyzed */
- Bitmask prereqLeft; /* Prerequesites of the pExpr->pLeft */
- Bitmask prereqAll; /* Prerequesites of pExpr */
+ Bitmask prereqLeft; /* Prerequisites of the pExpr->pLeft */
+ Bitmask prereqAll; /* Prerequisites of pExpr */
Bitmask extraRight = 0; /* Extra dependencies on LEFT JOIN */
Expr *pStr1 = 0; /* RHS of LIKE/GLOB operator */
int isComplete = 0; /* RHS of LIKE/GLOB ends with wildcard */
@@ -158460,12 +161448,22 @@ static void translateColumnToCopy(
for(; iStart<iEnd; iStart++, pOp++){
if( pOp->p1!=iTabCur ) continue;
if( pOp->opcode==OP_Column ){
+#ifdef SQLITE_DEBUG
+ if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
+ printf("TRANSLATE OP_Column to OP_Copy at %d\n", iStart);
+ }
+#endif
pOp->opcode = OP_Copy;
pOp->p1 = pOp->p2 + iRegister;
pOp->p2 = pOp->p3;
pOp->p3 = 0;
pOp->p5 = 2; /* Cause the MEM_Subtype flag to be cleared */
}else if( pOp->opcode==OP_Rowid ){
+#ifdef SQLITE_DEBUG
+ if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
+ printf("TRANSLATE OP_Rowid to OP_Sequence at %d\n", iStart);
+ }
+#endif
pOp->opcode = OP_Sequence;
pOp->p1 = iAutoidxCur;
#ifdef SQLITE_ALLOW_ROWID_IN_VIEW
@@ -158924,13 +161922,17 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter(
WhereLoop *pLoop = pLevel->pWLoop; /* The loop being coded */
int iCur; /* Cursor for table getting the filter */
IndexedExpr *saved_pIdxEpr; /* saved copy of Parse.pIdxEpr */
+ IndexedExpr *saved_pIdxPartExpr; /* saved copy of Parse.pIdxPartExpr */
saved_pIdxEpr = pParse->pIdxEpr;
+ saved_pIdxPartExpr = pParse->pIdxPartExpr;
pParse->pIdxEpr = 0;
+ pParse->pIdxPartExpr = 0;
assert( pLoop!=0 );
assert( v!=0 );
assert( pLoop->wsFlags & WHERE_BLOOMFILTER );
+ assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 );
addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
do{
@@ -159020,6 +162022,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter(
}while( iLevel < pWInfo->nLevel );
sqlite3VdbeJumpHere(v, addrOnce);
pParse->pIdxEpr = saved_pIdxEpr;
+ pParse->pIdxPartExpr = saved_pIdxPartExpr;
}
@@ -159535,7 +162538,7 @@ SQLITE_PRIVATE char sqlite3IndexColumnAffinity(sqlite3 *db, Index *pIdx, int iCo
** Value pLoop->nOut is currently set to the estimated number of rows
** visited for scanning (a=? AND b=?). This function reduces that estimate
** by some factor to account for the (c BETWEEN ? AND ?) expression based
-** on the stat4 data for the index. this scan will be peformed multiple
+** on the stat4 data for the index. this scan will be performed multiple
** times (once for each (a,b) combination that matches a=?) is dealt with
** by the caller.
**
@@ -159787,7 +162790,8 @@ static int whereRangeScanEst(
** sample, then assume they are 4x more selective. This brings
** the estimated selectivity more in line with what it would be
** if estimated without the use of STAT4 tables. */
- if( iLwrIdx==iUprIdx ) nNew -= 20; assert( 20==sqlite3LogEst(4) );
+ if( iLwrIdx==iUprIdx ){ nNew -= 20; }
+ assert( 20==sqlite3LogEst(4) );
}else{
nNew = 10; assert( 10==sqlite3LogEst(2) );
}
@@ -160011,17 +163015,34 @@ SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){
#ifdef WHERETRACE_ENABLED
/*
** Print a WhereLoop object for debugging purposes
-*/
-SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){
- WhereInfo *pWInfo = pWC->pWInfo;
- int nb = 1+(pWInfo->pTabList->nSrc+3)/4;
- SrcItem *pItem = pWInfo->pTabList->a + p->iTab;
- Table *pTab = pItem->pTab;
- Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1;
- sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId,
- p->iTab, nb, p->maskSelf, nb, p->prereq & mAll);
- sqlite3DebugPrintf(" %12s",
- pItem->zAlias ? pItem->zAlias : pTab->zName);
+**
+** Format example:
+**
+** .--- Position in WHERE clause rSetup, rRun, nOut ---.
+** | |
+** | .--- selfMask nTerm ------. |
+** | | | |
+** | | .-- prereq Idx wsFlags----. | |
+** | | | Name | | |
+** | | | __|__ nEq ---. ___|__ | __|__
+** | / \ / \ / \ | / \ / \ / \
+** 1.002.001 t2.t2xy 2 f 010241 N 2 cost 0,56,31
+*/
+SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC){
+ if( pWC ){
+ WhereInfo *pWInfo = pWC->pWInfo;
+ int nb = 1+(pWInfo->pTabList->nSrc+3)/4;
+ SrcItem *pItem = pWInfo->pTabList->a + p->iTab;
+ Table *pTab = pItem->pTab;
+ Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1;
+ sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId,
+ p->iTab, nb, p->maskSelf, nb, p->prereq & mAll);
+ sqlite3DebugPrintf(" %12s",
+ pItem->zAlias ? pItem->zAlias : pTab->zName);
+ }else{
+ sqlite3DebugPrintf("%c%2d.%03llx.%03llx %c%d",
+ p->cId, p->iTab, p->maskSelf, p->prereq & 0xfff, p->cId, p->iTab);
+ }
if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){
const char *zName;
if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){
@@ -160058,6 +163079,15 @@ SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){
}
}
}
+SQLITE_PRIVATE void sqlite3ShowWhereLoop(const WhereLoop *p){
+ if( p ) sqlite3WhereLoopPrint(p, 0);
+}
+SQLITE_PRIVATE void sqlite3ShowWhereLoopList(const WhereLoop *p){
+ while( p ){
+ sqlite3ShowWhereLoop(p);
+ p = p->pNextLoop;
+ }
+}
#endif
/*
@@ -160170,46 +163200,60 @@ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){
}
/*
-** Return TRUE if all of the following are true:
+** Return TRUE if X is a proper subset of Y but is of equal or less cost.
+** In other words, return true if all constraints of X are also part of Y
+** and Y has additional constraints that might speed the search that X lacks
+** but the cost of running X is not more than the cost of running Y.
+**
+** In other words, return true if the cost relationwship between X and Y
+** is inverted and needs to be adjusted.
**
-** (1) X has the same or lower cost, or returns the same or fewer rows,
-** than Y.
-** (2) X uses fewer WHERE clause terms than Y
-** (3) Every WHERE clause term used by X is also used by Y
-** (4) X skips at least as many columns as Y
-** (5) If X is a covering index, than Y is too
+** Case 1:
**
-** Conditions (2) and (3) mean that X is a "proper subset" of Y.
-** If X is a proper subset of Y then Y is a better choice and ought
-** to have a lower cost. This routine returns TRUE when that cost
-** relationship is inverted and needs to be adjusted. Constraint (4)
-** was added because if X uses skip-scan less than Y it still might
-** deserve a lower cost even if it is a proper subset of Y. Constraint (5)
-** was added because a covering index probably deserves to have a lower cost
-** than a non-covering index even if it is a proper subset.
+** (1a) X and Y use the same index.
+** (1b) X has fewer == terms than Y
+** (1c) Neither X nor Y use skip-scan
+** (1d) X does not have a a greater cost than Y
+**
+** Case 2:
+**
+** (2a) X has the same or lower cost, or returns the same or fewer rows,
+** than Y.
+** (2b) X uses fewer WHERE clause terms than Y
+** (2c) Every WHERE clause term used by X is also used by Y
+** (2d) X skips at least as many columns as Y
+** (2e) If X is a covering index, than Y is too
*/
static int whereLoopCheaperProperSubset(
const WhereLoop *pX, /* First WhereLoop to compare */
const WhereLoop *pY /* Compare against this WhereLoop */
){
int i, j;
+ if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0; /* (1d) and (2a) */
+ assert( (pX->wsFlags & WHERE_VIRTUALTABLE)==0 );
+ assert( (pY->wsFlags & WHERE_VIRTUALTABLE)==0 );
+ if( pX->u.btree.nEq < pY->u.btree.nEq /* (1b) */
+ && pX->u.btree.pIndex==pY->u.btree.pIndex /* (1a) */
+ && pX->nSkip==0 && pY->nSkip==0 /* (1c) */
+ ){
+ return 1; /* Case 1 is true */
+ }
if( pX->nLTerm-pX->nSkip >= pY->nLTerm-pY->nSkip ){
- return 0; /* X is not a subset of Y */
+ return 0; /* (2b) */
}
- if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0;
- if( pY->nSkip > pX->nSkip ) return 0;
+ if( pY->nSkip > pX->nSkip ) return 0; /* (2d) */
for(i=pX->nLTerm-1; i>=0; i--){
if( pX->aLTerm[i]==0 ) continue;
for(j=pY->nLTerm-1; j>=0; j--){
if( pY->aLTerm[j]==pX->aLTerm[i] ) break;
}
- if( j<0 ) return 0; /* X not a subset of Y since term X[i] not used by Y */
+ if( j<0 ) return 0; /* (2c) */
}
if( (pX->wsFlags&WHERE_IDX_ONLY)!=0
&& (pY->wsFlags&WHERE_IDX_ONLY)==0 ){
- return 0; /* Constraint (5) */
+ return 0; /* (2e) */
}
- return 1; /* All conditions meet */
+ return 1; /* Case 2 is true */
}
/*
@@ -160290,7 +163334,7 @@ static WhereLoop **whereLoopFindLesser(
** rSetup. Call this SETUP-INVARIANT */
assert( p->rSetup>=pTemplate->rSetup );
- /* Any loop using an appliation-defined index (or PRIMARY KEY or
+ /* Any loop using an application-defined index (or PRIMARY KEY or
** UNIQUE constraint) with one or more == constraints is better
** than an automatic index. Unless it is a skip-scan. */
if( (p->wsFlags & WHERE_AUTO_INDEX)!=0
@@ -160317,7 +163361,7 @@ static WhereLoop **whereLoopFindLesser(
/* If pTemplate is always better than p, then cause p to be overwritten
** with pTemplate. pTemplate is better than p if:
- ** (1) pTemplate has no more dependences than p, and
+ ** (1) pTemplate has no more dependencies than p, and
** (2) pTemplate has an equal or lower cost than p.
*/
if( (p->prereq & pTemplate->prereq)==pTemplate->prereq /* (1) */
@@ -160435,7 +163479,7 @@ static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){
}else{
/* We will be overwriting WhereLoop p[]. But before we do, first
** go through the rest of the list and delete any other entries besides
- ** p[] that are also supplated by pTemplate */
+ ** p[] that are also supplanted by pTemplate */
WhereLoop **ppTail = &p->pNextLoop;
WhereLoop *pToDel;
while( *ppTail ){
@@ -160635,7 +163679,7 @@ static int whereRangeVectorLen(
}
/*
-** Adjust the cost C by the costMult facter T. This only occurs if
+** Adjust the cost C by the costMult factor T. This only occurs if
** compiled with -DSQLITE_ENABLE_COSTMULT
*/
#ifdef SQLITE_ENABLE_COSTMULT
@@ -160662,7 +163706,7 @@ static int whereLoopAddBtreeIndex(
Index *pProbe, /* An index on pSrc */
LogEst nInMul /* log(Number of iterations due to IN) */
){
- WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyse context */
+ WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyze context */
Parse *pParse = pWInfo->pParse; /* Parsing context */
sqlite3 *db = pParse->db; /* Database connection malloc context */
WhereLoop *pNew; /* Template WhereLoop under construction */
@@ -160699,7 +163743,10 @@ static int whereLoopAddBtreeIndex(
assert( pNew->u.btree.nBtm==0 );
opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS;
}
- if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE);
+ if( pProbe->bUnordered || pProbe->bLowQual ){
+ if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE);
+ if( pProbe->bLowQual ) opMask &= ~(WO_EQ|WO_IN|WO_IS);
+ }
assert( pNew->u.btree.nEq<pProbe->nColumn );
assert( pNew->u.btree.nEq<pProbe->nKeyCol
@@ -160972,7 +164019,7 @@ static int whereLoopAddBtreeIndex(
assert( pSrc->pTab->szTabRow>0 );
if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){
/* The pProbe->szIdxRow is low for an IPK table since the interior
- ** pages are small. Thuse szIdxRow gives a good estimate of seek cost.
+ ** pages are small. Thus szIdxRow gives a good estimate of seek cost.
** But the leaf pages are full-size, so pProbe->szIdxRow would badly
** under-estimate the scanning cost. */
rCostIdx = pNew->nOut + 16;
@@ -161280,6 +164327,100 @@ static SQLITE_NOINLINE u32 whereIsCoveringIndex(
}
/*
+** This is an sqlite3ParserAddCleanup() callback that is invoked to
+** free the Parse->pIdxEpr list when the Parse object is destroyed.
+*/
+static void whereIndexedExprCleanup(sqlite3 *db, void *pObject){
+ IndexedExpr **pp = (IndexedExpr**)pObject;
+ while( *pp!=0 ){
+ IndexedExpr *p = *pp;
+ *pp = p->pIENext;
+ sqlite3ExprDelete(db, p->pExpr);
+ sqlite3DbFreeNN(db, p);
+ }
+}
+
+/*
+** This function is called for a partial index - one with a WHERE clause - in
+** two scenarios. In both cases, it determines whether or not the WHERE
+** clause on the index implies that a column of the table may be safely
+** replaced by a constant expression. For example, in the following
+** SELECT:
+**
+** CREATE INDEX i1 ON t1(b, c) WHERE a=<expr>;
+** SELECT a, b, c FROM t1 WHERE a=<expr> AND b=?;
+**
+** The "a" in the select-list may be replaced by <expr>, iff:
+**
+** (a) <expr> is a constant expression, and
+** (b) The (a=<expr>) comparison uses the BINARY collation sequence, and
+** (c) Column "a" has an affinity other than NONE or BLOB.
+**
+** If argument pItem is NULL, then pMask must not be NULL. In this case this
+** function is being called as part of determining whether or not pIdx
+** is a covering index. This function clears any bits in (*pMask)
+** corresponding to columns that may be replaced by constants as described
+** above.
+**
+** Otherwise, if pItem is not NULL, then this function is being called
+** as part of coding a loop that uses index pIdx. In this case, add entries
+** to the Parse.pIdxPartExpr list for each column that can be replaced
+** by a constant.
+*/
+static void wherePartIdxExpr(
+ Parse *pParse, /* Parse context */
+ Index *pIdx, /* Partial index being processed */
+ Expr *pPart, /* WHERE clause being processed */
+ Bitmask *pMask, /* Mask to clear bits in */
+ int iIdxCur, /* Cursor number for index */
+ SrcItem *pItem /* The FROM clause entry for the table */
+){
+ assert( pItem==0 || (pItem->fg.jointype & JT_RIGHT)==0 );
+ assert( (pItem==0 || pMask==0) && (pMask!=0 || pItem!=0) );
+
+ if( pPart->op==TK_AND ){
+ wherePartIdxExpr(pParse, pIdx, pPart->pRight, pMask, iIdxCur, pItem);
+ pPart = pPart->pLeft;
+ }
+
+ if( (pPart->op==TK_EQ || pPart->op==TK_IS) ){
+ Expr *pLeft = pPart->pLeft;
+ Expr *pRight = pPart->pRight;
+ u8 aff;
+
+ if( pLeft->op!=TK_COLUMN ) return;
+ if( !sqlite3ExprIsConstant(pRight) ) return;
+ if( !sqlite3IsBinary(sqlite3ExprCompareCollSeq(pParse, pPart)) ) return;
+ if( pLeft->iColumn<0 ) return;
+ aff = pIdx->pTable->aCol[pLeft->iColumn].affinity;
+ if( aff>=SQLITE_AFF_TEXT ){
+ if( pItem ){
+ sqlite3 *db = pParse->db;
+ IndexedExpr *p = (IndexedExpr*)sqlite3DbMallocRaw(db, sizeof(*p));
+ if( p ){
+ int bNullRow = (pItem->fg.jointype&(JT_LEFT|JT_LTORJ))!=0;
+ p->pExpr = sqlite3ExprDup(db, pRight, 0);
+ p->iDataCur = pItem->iCursor;
+ p->iIdxCur = iIdxCur;
+ p->iIdxCol = pLeft->iColumn;
+ p->bMaybeNullRow = bNullRow;
+ p->pIENext = pParse->pIdxPartExpr;
+ p->aff = aff;
+ pParse->pIdxPartExpr = p;
+ if( p->pIENext==0 ){
+ void *pArg = (void*)&pParse->pIdxPartExpr;
+ sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pArg);
+ }
+ }
+ }else if( pLeft->iColumn<(BMS-1) ){
+ *pMask &= ~((Bitmask)1 << pLeft->iColumn);
+ }
+ }
+ }
+}
+
+
+/*
** Add all WhereLoop objects for a single table of the join where the table
** is identified by pBuilder->pNew->iTab. That table is guaranteed to be
** a b-tree table, not a virtual table.
@@ -161317,7 +164458,7 @@ static SQLITE_NOINLINE u32 whereIsCoveringIndex(
*/
static int whereLoopAddBtree(
WhereLoopBuilder *pBuilder, /* WHERE clause information */
- Bitmask mPrereq /* Extra prerequesites for using this table */
+ Bitmask mPrereq /* Extra prerequisites for using this table */
){
WhereInfo *pWInfo; /* WHERE analysis context */
Index *pProbe; /* An index we are evaluating */
@@ -161482,9 +164623,6 @@ static int whereLoopAddBtree(
#else
pNew->rRun = rSize + 16;
#endif
- if( IsView(pTab) || (pTab->tabFlags & TF_Ephemeral)!=0 ){
- pNew->wsFlags |= WHERE_VIEWSCAN;
- }
ApplyCostMultiplier(pNew->rRun, pTab->costMult);
whereLoopOutputAdjust(pWC, pNew, rSize);
rc = whereLoopInsert(pBuilder, pNew);
@@ -161497,6 +164635,11 @@ static int whereLoopAddBtree(
pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED;
}else{
m = pSrc->colUsed & pProbe->colNotIdxed;
+ if( pProbe->pPartIdxWhere ){
+ wherePartIdxExpr(
+ pWInfo->pParse, pProbe, pProbe->pPartIdxWhere, &m, 0, 0
+ );
+ }
pNew->wsFlags = WHERE_INDEXED;
if( m==TOPBIT || (pProbe->bHasExpr && !pProbe->bHasVCol && m!=0) ){
u32 isCov = whereIsCoveringIndex(pWInfo, pProbe, pSrc->iCursor);
@@ -161824,7 +164967,7 @@ static int whereLoopAddVirtualOne(
**
** Return a pointer to the collation name:
**
-** 1. If there is an explicit COLLATE operator on the constaint, return it.
+** 1. If there is an explicit COLLATE operator on the constraint, return it.
**
** 2. Else, if the column has an alternative collation, return that.
**
@@ -161879,7 +165022,7 @@ SQLITE_API int sqlite3_vtab_rhs_value(
sqlite3_value *pVal = 0;
int rc = SQLITE_OK;
if( iCons<0 || iCons>=pIdxInfo->nConstraint ){
- rc = SQLITE_MISUSE; /* EV: R-30545-25046 */
+ rc = SQLITE_MISUSE_BKPT; /* EV: R-30545-25046 */
}else{
if( pH->aRhs[iCons]==0 ){
WhereTerm *pTerm = &pH->pWC->a[pIdxInfo->aConstraint[iCons].iTermOffset];
@@ -162785,7 +165928,8 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
** For joins of 3 or more tables, track the 10 best paths */
mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10);
assert( nLoop<=pWInfo->pTabList->nSrc );
- WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d)\n", nRowEst));
+ WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d, nQueryLoop=%d)\n",
+ nRowEst, pParse->nQueryLoop));
/* If nRowEst is zero and there is an ORDER BY clause, ignore it. In this
** case the purpose of this call is to estimate the number of rows returned
@@ -162888,7 +166032,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
);
}
/* TUNING: Add a small extra penalty (3) to sorting as an
- ** extra encouragment to the query planner to select a plan
+ ** extra encouragement to the query planner to select a plan
** where the rows emerge in the correct order without any sorting
** required. */
rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 3;
@@ -162902,13 +166046,6 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
rUnsorted -= 2; /* TUNING: Slight bias in favor of no-sort plans */
}
- /* TUNING: A full-scan of a VIEW or subquery in the outer loop
- ** is not so bad. */
- if( iLoop==0 && (pWLoop->wsFlags & WHERE_VIEWSCAN)!=0 ){
- rCost += -10;
- nOut += -30;
- }
-
/* Check to see if pWLoop should be added to the set of
** mxChoice best-so-far paths.
**
@@ -163459,20 +166596,6 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful(
}
/*
-** This is an sqlite3ParserAddCleanup() callback that is invoked to
-** free the Parse->pIdxEpr list when the Parse object is destroyed.
-*/
-static void whereIndexedExprCleanup(sqlite3 *db, void *pObject){
- Parse *pParse = (Parse*)pObject;
- while( pParse->pIdxEpr!=0 ){
- IndexedExpr *p = pParse->pIdxEpr;
- pParse->pIdxEpr = p->pIENext;
- sqlite3ExprDelete(db, p->pExpr);
- sqlite3DbFreeNN(db, p);
- }
-}
-
-/*
** The index pIdx is used by a query and contains one or more expressions.
** In other words pIdx is an index on an expression. iIdxCur is the cursor
** number for the index and iDataCur is the cursor number for the corresponding
@@ -163511,6 +166634,20 @@ static SQLITE_NOINLINE void whereAddIndexedExpr(
continue;
}
if( sqlite3ExprIsConstant(pExpr) ) continue;
+ if( pExpr->op==TK_FUNCTION ){
+ /* Functions that might set a subtype should not be replaced by the
+ ** value taken from an expression index since the index omits the
+ ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */
+ int n;
+ FuncDef *pDef;
+ sqlite3 *db = pParse->db;
+ assert( ExprUseXList(pExpr) );
+ n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0;
+ pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0);
+ if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){
+ continue;
+ }
+ }
p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr));
if( p==0 ) break;
p->pIENext = pParse->pIdxEpr;
@@ -163533,7 +166670,30 @@ static SQLITE_NOINLINE void whereAddIndexedExpr(
#endif
pParse->pIdxEpr = p;
if( p->pIENext==0 ){
- sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pParse);
+ void *pArg = (void*)&pParse->pIdxEpr;
+ sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pArg);
+ }
+ }
+}
+
+/*
+** Set the reverse-scan order mask to one for all tables in the query
+** with the exception of MATERIALIZED common table expressions that have
+** their own internal ORDER BY clauses.
+**
+** This implements the PRAGMA reverse_unordered_selects=ON setting.
+** (Also SQLITE_DBCONFIG_REVERSE_SCANORDER).
+*/
+static SQLITE_NOINLINE void whereReverseScanOrder(WhereInfo *pWInfo){
+ int ii;
+ for(ii=0; ii<pWInfo->pTabList->nSrc; ii++){
+ SrcItem *pItem = &pWInfo->pTabList->a[ii];
+ if( !pItem->fg.isCte
+ || pItem->u2.pCteUse->eM10d!=M10d_Yes
+ || NEVER(pItem->pSelect==0)
+ || pItem->pSelect->pOrderBy==0
+ ){
+ pWInfo->revMask |= MASKBIT(ii);
}
}
}
@@ -163596,7 +166756,7 @@ static SQLITE_NOINLINE void whereAddIndexedExpr(
**
** OUTER JOINS
**
-** An outer join of tables t1 and t2 is conceptally coded as follows:
+** An outer join of tables t1 and t2 is conceptually coded as follows:
**
** foreach row1 in t1 do
** flag = 0
@@ -163666,7 +166826,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
/* An ORDER/GROUP BY clause of more than 63 terms cannot be optimized */
testcase( pOrderBy && pOrderBy->nExpr==BMS-1 );
- if( pOrderBy && pOrderBy->nExpr>=BMS ) pOrderBy = 0;
+ if( pOrderBy && pOrderBy->nExpr>=BMS ){
+ pOrderBy = 0;
+ wctrlFlags &= ~WHERE_WANT_DISTINCT;
+ }
/* The number of tables in the FROM clause is limited by the number of
** bits in a Bitmask
@@ -163691,7 +166854,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
** field (type Bitmask) it must be aligned on an 8-byte boundary on
** some architectures. Hence the ROUND8() below.
*/
- nByteWInfo = ROUND8P(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel));
+ nByteWInfo = ROUND8P(sizeof(WhereInfo));
+ if( nTabList>1 ){
+ nByteWInfo = ROUND8P(nByteWInfo + (nTabList-1)*sizeof(WhereLevel));
+ }
pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop));
if( db->mallocFailed ){
sqlite3DbFree(db, pWInfo);
@@ -163751,7 +166917,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
**
** The N-th term of the FROM clause is assigned a bitmask of 1<<N.
**
- ** The rule of the previous sentence ensures thta if X is the bitmask for
+ ** The rule of the previous sentence ensures that if X is the bitmask for
** a table T, then X-1 is the bitmask for all other tables to the left of T.
** Knowing the bitmask for all tables to the left of a left join is
** important. Ticket #3015.
@@ -163901,9 +167067,20 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
wherePathSolver(pWInfo, pWInfo->nRowOut+1);
if( db->mallocFailed ) goto whereBeginError;
}
+
+ /* TUNING: Assume that a DISTINCT clause on a subquery reduces
+ ** the output size by a factor of 8 (LogEst -30).
+ */
+ if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)!=0 ){
+ WHERETRACE(0x0080,("nRowOut reduced from %d to %d due to DISTINCT\n",
+ pWInfo->nRowOut, pWInfo->nRowOut-30));
+ pWInfo->nRowOut -= 30;
+ }
+
}
+ assert( pWInfo->pTabList!=0 );
if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){
- pWInfo->revMask = ALLBITS;
+ whereReverseScanOrder(pWInfo);
}
if( pParse->nErr ){
goto whereBeginError;
@@ -164003,6 +167180,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
0!=(wctrlFlags & WHERE_ONEPASS_MULTIROW)
&& !IsVirtual(pTabList->a[0].pTab)
&& (0==(wsFlags & WHERE_MULTI_OR) || (wctrlFlags & WHERE_DUPLICATES_OK))
+ && OptimizationEnabled(db, SQLITE_OnePass)
)){
pWInfo->eOnePass = bOnerow ? ONEPASS_SINGLE : ONEPASS_MULTI;
if( HasRowid(pTabList->a[0].pTab) && (wsFlags & WHERE_IDX_ONLY) ){
@@ -164111,6 +167289,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
if( pIx->bHasExpr && OptimizationEnabled(db, SQLITE_IndexedExpr) ){
whereAddIndexedExpr(pParse, pIx, iIndexCur, pTabItem);
}
+ if( pIx->pPartIdxWhere && (pTabItem->fg.jointype & JT_RIGHT)==0 ){
+ wherePartIdxExpr(
+ pParse, pIx, pIx->pPartIdxWhere, 0, iIndexCur, pTabItem
+ );
+ }
}
pLevel->iIdxCur = iIndexCur;
assert( pIx!=0 );
@@ -164236,6 +167419,11 @@ whereBeginError:
pParse->nQueryLoop = pWInfo->savedNQueryLoop;
whereInfoFree(db, pWInfo);
}
+#ifdef WHERETRACE_ENABLED
+ /* Prevent harmless compiler warnings about debugging routines
+ ** being declared but never used */
+ sqlite3ShowWhereLoopList(0);
+#endif /* WHERETRACE_ENABLED */
return 0;
}
@@ -164732,7 +167920,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
**
** These are the same built-in window functions supported by Postgres.
** Although the behaviour of aggregate window functions (functions that
-** can be used as either aggregates or window funtions) allows them to
+** can be used as either aggregates or window functions) allows them to
** be implemented using an API, built-in window functions are much more
** esoteric. Additionally, some window functions (e.g. nth_value())
** may only be implemented by caching the entire partition in memory.
@@ -165262,7 +168450,7 @@ static Window *windowFind(Parse *pParse, Window *pList, const char *zName){
** is the Window object representing the associated OVER clause. This
** function updates the contents of pWin as follows:
**
-** * If the OVER clause refered to a named window (as in "max(x) OVER win"),
+** * If the OVER clause referred to a named window (as in "max(x) OVER win"),
** search list pList for a matching WINDOW definition, and update pWin
** accordingly. If no such WINDOW clause can be found, leave an error
** in pParse.
@@ -165653,7 +168841,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){
assert( ExprUseXList(pWin->pOwner) );
assert( pWin->pWFunc!=0 );
pArgs = pWin->pOwner->x.pList;
- if( pWin->pWFunc->funcFlags & SQLITE_FUNC_SUBTYPE ){
+ if( pWin->pWFunc->funcFlags & SQLITE_SUBTYPE ){
selectWindowRewriteEList(pParse, pMWin, pSrc, pArgs, pTab, &pSublist);
pWin->iArgCol = (pSublist ? pSublist->nExpr : 0);
pWin->bExprArgs = 1;
@@ -165883,7 +169071,7 @@ SQLITE_PRIVATE Window *sqlite3WindowAssemble(
}
/*
-** Window *pWin has just been created from a WINDOW clause. Tokne pBase
+** Window *pWin has just been created from a WINDOW clause. Token pBase
** is the base window. Earlier windows from the same WINDOW clause are
** stored in the linked list starting at pWin->pNextWin. This function
** either updates *pWin according to the base specification, or else
@@ -165927,8 +169115,9 @@ SQLITE_PRIVATE void sqlite3WindowAttach(Parse *pParse, Expr *p, Window *pWin){
if( p ){
assert( p->op==TK_FUNCTION );
assert( pWin );
+ assert( ExprIsFullSize(p) );
p->y.pWin = pWin;
- ExprSetProperty(p, EP_WinFunc);
+ ExprSetProperty(p, EP_WinFunc|EP_FullSize);
pWin->pOwner = p;
if( (p->flags & EP_Distinct) && pWin->eFrmType!=TK_FILTER ){
sqlite3ErrorMsg(pParse,
@@ -166189,7 +169378,7 @@ struct WindowCsrAndReg {
**
** (ORDER BY a, b GROUPS BETWEEN 2 PRECEDING AND 2 FOLLOWING)
**
-** The windows functions implmentation caches the input rows in a temp
+** The windows functions implementation caches the input rows in a temp
** table, sorted by "a, b" (it actually populates the cache lazily, and
** aggressively removes rows once they are no longer required, but that's
** a mere detail). It keeps three cursors open on the temp table. One
@@ -167198,7 +170387,7 @@ static int windowExprGtZero(Parse *pParse, Expr *pExpr){
**
** For the most part, the patterns above are adapted to support UNBOUNDED by
** assuming that it is equivalent to "infinity PRECEDING/FOLLOWING" and
-** CURRENT ROW by assuming that it is equivilent to "0 PRECEDING/FOLLOWING".
+** CURRENT ROW by assuming that it is equivalent to "0 PRECEDING/FOLLOWING".
** This is optimized of course - branches that will never be taken and
** conditions that are always true are omitted from the VM code. The only
** exceptional case is:
@@ -167477,7 +170666,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep(
}
/* Allocate registers for the array of values from the sub-query, the
- ** samve values in record form, and the rowid used to insert said record
+ ** same values in record form, and the rowid used to insert said record
** into the ephemeral table. */
regNew = pParse->nMem+1;
pParse->nMem += nInput;
@@ -167718,7 +170907,8 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep(
/************** End of window.c **********************************************/
/************** Begin file parse.c *******************************************/
/* This file is automatically generated by Lemon from input grammar
-** source file "parse.y". */
+** source file "parse.y".
+*/
/*
** 2001-09-15
**
@@ -167735,7 +170925,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep(
** The canonical source code to this file ("parse.y") is a Lemon grammar
** file that specifies the input grammar and actions to take while parsing.
** That input file is processed by Lemon to generate a C-language
-** implementation of a parser for the given grammer. You might be reading
+** implementation of a parser for the given grammar. You might be reading
** this comment as part of the translated C-code. Edits should be made
** to the original parse.y sources.
*/
@@ -168229,18 +171419,18 @@ typedef union {
#define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse;
#define sqlite3ParserCTX_STORE yypParser->pParse=pParse;
#define YYFALLBACK 1
-#define YYNSTATE 575
-#define YYNRULE 403
+#define YYNSTATE 579
+#define YYNRULE 405
#define YYNRULE_WITH_ACTION 340
#define YYNTOKEN 185
-#define YY_MAX_SHIFT 574
-#define YY_MIN_SHIFTREDUCE 833
-#define YY_MAX_SHIFTREDUCE 1235
-#define YY_ERROR_ACTION 1236
-#define YY_ACCEPT_ACTION 1237
-#define YY_NO_ACTION 1238
-#define YY_MIN_REDUCE 1239
-#define YY_MAX_REDUCE 1641
+#define YY_MAX_SHIFT 578
+#define YY_MIN_SHIFTREDUCE 838
+#define YY_MAX_SHIFTREDUCE 1242
+#define YY_ERROR_ACTION 1243
+#define YY_ACCEPT_ACTION 1244
+#define YY_NO_ACTION 1245
+#define YY_MIN_REDUCE 1246
+#define YY_MAX_REDUCE 1650
/************* End control #defines *******************************************/
#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
@@ -168307,218 +171497,218 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (2096)
+#define YY_ACTTAB_COUNT (2100)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 568, 208, 568, 118, 115, 229, 568, 118, 115, 229,
- /* 10 */ 568, 1310, 377, 1289, 408, 562, 562, 562, 568, 409,
- /* 20 */ 378, 1310, 1272, 41, 41, 41, 41, 208, 1520, 71,
- /* 30 */ 71, 969, 419, 41, 41, 491, 303, 279, 303, 970,
- /* 40 */ 397, 71, 71, 125, 126, 80, 1212, 1212, 1047, 1050,
- /* 50 */ 1037, 1037, 123, 123, 124, 124, 124, 124, 476, 409,
- /* 60 */ 1237, 1, 1, 574, 2, 1241, 550, 118, 115, 229,
- /* 70 */ 317, 480, 146, 480, 524, 118, 115, 229, 529, 1323,
- /* 80 */ 417, 523, 142, 125, 126, 80, 1212, 1212, 1047, 1050,
- /* 90 */ 1037, 1037, 123, 123, 124, 124, 124, 124, 118, 115,
- /* 100 */ 229, 327, 122, 122, 122, 122, 121, 121, 120, 120,
- /* 110 */ 120, 119, 116, 444, 284, 284, 284, 284, 442, 442,
- /* 120 */ 442, 1561, 376, 1563, 1188, 375, 1159, 565, 1159, 565,
- /* 130 */ 409, 1561, 537, 259, 226, 444, 101, 145, 449, 316,
- /* 140 */ 559, 240, 122, 122, 122, 122, 121, 121, 120, 120,
- /* 150 */ 120, 119, 116, 444, 125, 126, 80, 1212, 1212, 1047,
- /* 160 */ 1050, 1037, 1037, 123, 123, 124, 124, 124, 124, 142,
- /* 170 */ 294, 1188, 339, 448, 120, 120, 120, 119, 116, 444,
- /* 180 */ 127, 1188, 1189, 1188, 148, 441, 440, 568, 119, 116,
- /* 190 */ 444, 124, 124, 124, 124, 117, 122, 122, 122, 122,
- /* 200 */ 121, 121, 120, 120, 120, 119, 116, 444, 454, 113,
- /* 210 */ 13, 13, 546, 122, 122, 122, 122, 121, 121, 120,
- /* 220 */ 120, 120, 119, 116, 444, 422, 316, 559, 1188, 1189,
- /* 230 */ 1188, 149, 1220, 409, 1220, 124, 124, 124, 124, 122,
- /* 240 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116,
- /* 250 */ 444, 465, 342, 1034, 1034, 1048, 1051, 125, 126, 80,
- /* 260 */ 1212, 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124,
- /* 270 */ 124, 124, 1275, 522, 222, 1188, 568, 409, 224, 514,
- /* 280 */ 175, 82, 83, 122, 122, 122, 122, 121, 121, 120,
- /* 290 */ 120, 120, 119, 116, 444, 1005, 16, 16, 1188, 133,
- /* 300 */ 133, 125, 126, 80, 1212, 1212, 1047, 1050, 1037, 1037,
- /* 310 */ 123, 123, 124, 124, 124, 124, 122, 122, 122, 122,
- /* 320 */ 121, 121, 120, 120, 120, 119, 116, 444, 1038, 546,
- /* 330 */ 1188, 373, 1188, 1189, 1188, 252, 1429, 399, 504, 501,
- /* 340 */ 500, 111, 560, 566, 4, 924, 924, 433, 499, 340,
- /* 350 */ 460, 328, 360, 394, 1233, 1188, 1189, 1188, 563, 568,
- /* 360 */ 122, 122, 122, 122, 121, 121, 120, 120, 120, 119,
- /* 370 */ 116, 444, 284, 284, 369, 1574, 1600, 441, 440, 154,
- /* 380 */ 409, 445, 71, 71, 1282, 565, 1217, 1188, 1189, 1188,
- /* 390 */ 85, 1219, 271, 557, 543, 515, 1555, 568, 98, 1218,
- /* 400 */ 6, 1274, 472, 142, 125, 126, 80, 1212, 1212, 1047,
- /* 410 */ 1050, 1037, 1037, 123, 123, 124, 124, 124, 124, 550,
- /* 420 */ 13, 13, 1024, 507, 1220, 1188, 1220, 549, 109, 109,
- /* 430 */ 222, 568, 1234, 175, 568, 427, 110, 197, 445, 569,
- /* 440 */ 445, 430, 1546, 1014, 325, 551, 1188, 270, 287, 368,
- /* 450 */ 510, 363, 509, 257, 71, 71, 543, 71, 71, 359,
- /* 460 */ 316, 559, 1606, 122, 122, 122, 122, 121, 121, 120,
- /* 470 */ 120, 120, 119, 116, 444, 1014, 1014, 1016, 1017, 27,
- /* 480 */ 284, 284, 1188, 1189, 1188, 1154, 568, 1605, 409, 899,
- /* 490 */ 190, 550, 356, 565, 550, 935, 533, 517, 1154, 516,
- /* 500 */ 413, 1154, 552, 1188, 1189, 1188, 568, 544, 1548, 51,
- /* 510 */ 51, 214, 125, 126, 80, 1212, 1212, 1047, 1050, 1037,
- /* 520 */ 1037, 123, 123, 124, 124, 124, 124, 1188, 474, 135,
- /* 530 */ 135, 409, 284, 284, 1484, 505, 121, 121, 120, 120,
- /* 540 */ 120, 119, 116, 444, 1005, 565, 518, 217, 541, 1555,
- /* 550 */ 316, 559, 142, 6, 532, 125, 126, 80, 1212, 1212,
- /* 560 */ 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124, 124,
- /* 570 */ 1549, 122, 122, 122, 122, 121, 121, 120, 120, 120,
- /* 580 */ 119, 116, 444, 485, 1188, 1189, 1188, 482, 281, 1263,
- /* 590 */ 955, 252, 1188, 373, 504, 501, 500, 1188, 340, 570,
- /* 600 */ 1188, 570, 409, 292, 499, 955, 874, 191, 480, 316,
- /* 610 */ 559, 384, 290, 380, 122, 122, 122, 122, 121, 121,
- /* 620 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1212,
- /* 630 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 640 */ 124, 409, 394, 1132, 1188, 867, 100, 284, 284, 1188,
- /* 650 */ 1189, 1188, 373, 1089, 1188, 1189, 1188, 1188, 1189, 1188,
- /* 660 */ 565, 455, 32, 373, 233, 125, 126, 80, 1212, 1212,
- /* 670 */ 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124, 124,
- /* 680 */ 1428, 957, 568, 228, 956, 122, 122, 122, 122, 121,
- /* 690 */ 121, 120, 120, 120, 119, 116, 444, 1154, 228, 1188,
- /* 700 */ 157, 1188, 1189, 1188, 1547, 13, 13, 301, 955, 1228,
- /* 710 */ 1154, 153, 409, 1154, 373, 1577, 1172, 5, 369, 1574,
- /* 720 */ 429, 1234, 3, 955, 122, 122, 122, 122, 121, 121,
- /* 730 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1212,
- /* 740 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 750 */ 124, 409, 208, 567, 1188, 1025, 1188, 1189, 1188, 1188,
- /* 760 */ 388, 850, 155, 1546, 286, 402, 1094, 1094, 488, 568,
- /* 770 */ 465, 342, 1315, 1315, 1546, 125, 126, 80, 1212, 1212,
- /* 780 */ 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124, 124,
- /* 790 */ 129, 568, 13, 13, 374, 122, 122, 122, 122, 121,
- /* 800 */ 121, 120, 120, 120, 119, 116, 444, 302, 568, 453,
- /* 810 */ 528, 1188, 1189, 1188, 13, 13, 1188, 1189, 1188, 1293,
- /* 820 */ 463, 1263, 409, 1313, 1313, 1546, 1010, 453, 452, 200,
- /* 830 */ 299, 71, 71, 1261, 122, 122, 122, 122, 121, 121,
- /* 840 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1212,
- /* 850 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 860 */ 124, 409, 227, 1069, 1154, 284, 284, 419, 312, 278,
- /* 870 */ 278, 285, 285, 1415, 406, 405, 382, 1154, 565, 568,
- /* 880 */ 1154, 1191, 565, 1594, 565, 125, 126, 80, 1212, 1212,
- /* 890 */ 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124, 124,
- /* 900 */ 453, 1476, 13, 13, 1530, 122, 122, 122, 122, 121,
- /* 910 */ 121, 120, 120, 120, 119, 116, 444, 201, 568, 354,
- /* 920 */ 1580, 574, 2, 1241, 838, 839, 840, 1556, 317, 1207,
- /* 930 */ 146, 6, 409, 255, 254, 253, 206, 1323, 9, 1191,
- /* 940 */ 262, 71, 71, 424, 122, 122, 122, 122, 121, 121,
- /* 950 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1212,
- /* 960 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 970 */ 124, 568, 284, 284, 568, 1208, 409, 573, 313, 1241,
- /* 980 */ 349, 1292, 352, 419, 317, 565, 146, 491, 525, 1637,
- /* 990 */ 395, 371, 491, 1323, 70, 70, 1291, 71, 71, 240,
- /* 1000 */ 1321, 104, 80, 1212, 1212, 1047, 1050, 1037, 1037, 123,
- /* 1010 */ 123, 124, 124, 124, 124, 122, 122, 122, 122, 121,
- /* 1020 */ 121, 120, 120, 120, 119, 116, 444, 1110, 284, 284,
- /* 1030 */ 428, 448, 1519, 1208, 439, 284, 284, 1483, 1348, 311,
- /* 1040 */ 474, 565, 1111, 969, 491, 491, 217, 1259, 565, 1532,
- /* 1050 */ 568, 970, 207, 568, 1024, 240, 383, 1112, 519, 122,
- /* 1060 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116,
- /* 1070 */ 444, 1015, 107, 71, 71, 1014, 13, 13, 910, 568,
- /* 1080 */ 1489, 568, 284, 284, 97, 526, 491, 448, 911, 1322,
- /* 1090 */ 1318, 545, 409, 284, 284, 565, 151, 209, 1489, 1491,
- /* 1100 */ 262, 450, 55, 55, 56, 56, 565, 1014, 1014, 1016,
- /* 1110 */ 443, 332, 409, 527, 12, 295, 125, 126, 80, 1212,
- /* 1120 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 1130 */ 124, 347, 409, 862, 1528, 1208, 125, 126, 80, 1212,
- /* 1140 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 1150 */ 124, 1133, 1635, 474, 1635, 371, 125, 114, 80, 1212,
- /* 1160 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 1170 */ 124, 1489, 329, 474, 331, 122, 122, 122, 122, 121,
- /* 1180 */ 121, 120, 120, 120, 119, 116, 444, 203, 1415, 568,
- /* 1190 */ 1290, 862, 464, 1208, 436, 122, 122, 122, 122, 121,
- /* 1200 */ 121, 120, 120, 120, 119, 116, 444, 553, 1133, 1636,
- /* 1210 */ 539, 1636, 15, 15, 890, 122, 122, 122, 122, 121,
- /* 1220 */ 121, 120, 120, 120, 119, 116, 444, 568, 298, 538,
- /* 1230 */ 1131, 1415, 1553, 1554, 1327, 409, 6, 6, 1165, 1264,
- /* 1240 */ 415, 320, 284, 284, 1415, 508, 565, 525, 300, 457,
- /* 1250 */ 43, 43, 568, 891, 12, 565, 330, 478, 425, 407,
- /* 1260 */ 126, 80, 1212, 1212, 1047, 1050, 1037, 1037, 123, 123,
- /* 1270 */ 124, 124, 124, 124, 568, 57, 57, 288, 1188, 1415,
- /* 1280 */ 496, 458, 392, 392, 391, 273, 389, 1131, 1552, 847,
- /* 1290 */ 1165, 407, 6, 568, 321, 1154, 470, 44, 44, 1551,
- /* 1300 */ 1110, 426, 234, 6, 323, 256, 540, 256, 1154, 431,
- /* 1310 */ 568, 1154, 322, 17, 487, 1111, 58, 58, 122, 122,
- /* 1320 */ 122, 122, 121, 121, 120, 120, 120, 119, 116, 444,
- /* 1330 */ 1112, 216, 481, 59, 59, 1188, 1189, 1188, 111, 560,
- /* 1340 */ 324, 4, 236, 456, 526, 568, 237, 456, 568, 437,
- /* 1350 */ 168, 556, 420, 141, 479, 563, 568, 293, 568, 1091,
- /* 1360 */ 568, 293, 568, 1091, 531, 568, 870, 8, 60, 60,
- /* 1370 */ 235, 61, 61, 568, 414, 568, 414, 568, 445, 62,
- /* 1380 */ 62, 45, 45, 46, 46, 47, 47, 199, 49, 49,
- /* 1390 */ 557, 568, 359, 568, 100, 486, 50, 50, 63, 63,
- /* 1400 */ 64, 64, 561, 415, 535, 410, 568, 1024, 568, 534,
- /* 1410 */ 316, 559, 316, 559, 65, 65, 14, 14, 568, 1024,
- /* 1420 */ 568, 512, 930, 870, 1015, 109, 109, 929, 1014, 66,
- /* 1430 */ 66, 131, 131, 110, 451, 445, 569, 445, 416, 177,
- /* 1440 */ 1014, 132, 132, 67, 67, 568, 467, 568, 930, 471,
- /* 1450 */ 1360, 283, 226, 929, 315, 1359, 407, 568, 459, 407,
- /* 1460 */ 1014, 1014, 1016, 239, 407, 86, 213, 1346, 52, 52,
- /* 1470 */ 68, 68, 1014, 1014, 1016, 1017, 27, 1579, 1176, 447,
- /* 1480 */ 69, 69, 288, 97, 108, 1535, 106, 392, 392, 391,
- /* 1490 */ 273, 389, 568, 877, 847, 881, 568, 111, 560, 466,
- /* 1500 */ 4, 568, 152, 30, 38, 568, 1128, 234, 396, 323,
- /* 1510 */ 111, 560, 527, 4, 563, 53, 53, 322, 568, 163,
- /* 1520 */ 163, 568, 337, 468, 164, 164, 333, 563, 76, 76,
- /* 1530 */ 568, 289, 1508, 568, 31, 1507, 568, 445, 338, 483,
- /* 1540 */ 100, 54, 54, 344, 72, 72, 296, 236, 1076, 557,
- /* 1550 */ 445, 877, 1356, 134, 134, 168, 73, 73, 141, 161,
- /* 1560 */ 161, 1568, 557, 535, 568, 319, 568, 348, 536, 1007,
- /* 1570 */ 473, 261, 261, 889, 888, 235, 535, 568, 1024, 568,
- /* 1580 */ 475, 534, 261, 367, 109, 109, 521, 136, 136, 130,
- /* 1590 */ 130, 1024, 110, 366, 445, 569, 445, 109, 109, 1014,
- /* 1600 */ 162, 162, 156, 156, 568, 110, 1076, 445, 569, 445,
- /* 1610 */ 410, 351, 1014, 568, 353, 316, 559, 568, 343, 568,
- /* 1620 */ 100, 497, 357, 258, 100, 896, 897, 140, 140, 355,
- /* 1630 */ 1306, 1014, 1014, 1016, 1017, 27, 139, 139, 362, 451,
- /* 1640 */ 137, 137, 138, 138, 1014, 1014, 1016, 1017, 27, 1176,
- /* 1650 */ 447, 568, 372, 288, 111, 560, 1018, 4, 392, 392,
- /* 1660 */ 391, 273, 389, 568, 1137, 847, 568, 1072, 568, 258,
- /* 1670 */ 492, 563, 568, 211, 75, 75, 555, 960, 234, 261,
- /* 1680 */ 323, 111, 560, 927, 4, 113, 77, 77, 322, 74,
- /* 1690 */ 74, 42, 42, 1369, 445, 48, 48, 1414, 563, 972,
- /* 1700 */ 973, 1088, 1087, 1088, 1087, 860, 557, 150, 928, 1342,
- /* 1710 */ 113, 1354, 554, 1419, 1018, 1271, 1262, 1250, 236, 1249,
- /* 1720 */ 1251, 445, 1587, 1339, 308, 276, 168, 309, 11, 141,
- /* 1730 */ 393, 310, 232, 557, 1401, 1024, 335, 291, 1396, 219,
- /* 1740 */ 336, 109, 109, 934, 297, 1406, 235, 341, 477, 110,
- /* 1750 */ 502, 445, 569, 445, 1389, 1405, 1014, 400, 1289, 365,
- /* 1760 */ 223, 1480, 1024, 1479, 1351, 1352, 1350, 1349, 109, 109,
- /* 1770 */ 204, 1590, 1228, 558, 265, 218, 110, 205, 445, 569,
- /* 1780 */ 445, 410, 387, 1014, 1527, 179, 316, 559, 1014, 1014,
- /* 1790 */ 1016, 1017, 27, 230, 1525, 1225, 79, 560, 85, 4,
- /* 1800 */ 418, 215, 548, 81, 84, 188, 1402, 173, 181, 461,
- /* 1810 */ 451, 35, 462, 563, 183, 1014, 1014, 1016, 1017, 27,
- /* 1820 */ 184, 1485, 185, 186, 495, 242, 98, 398, 1408, 36,
- /* 1830 */ 1407, 484, 91, 469, 401, 1410, 445, 192, 1474, 246,
- /* 1840 */ 1496, 490, 346, 277, 248, 196, 493, 511, 557, 350,
- /* 1850 */ 1252, 249, 250, 403, 1309, 1308, 111, 560, 432, 4,
- /* 1860 */ 1307, 1300, 93, 1604, 881, 1603, 224, 404, 434, 520,
- /* 1870 */ 263, 435, 1573, 563, 1279, 1278, 364, 1024, 306, 1277,
- /* 1880 */ 264, 1602, 1559, 109, 109, 370, 1299, 307, 1558, 438,
- /* 1890 */ 128, 110, 1374, 445, 569, 445, 445, 546, 1014, 10,
- /* 1900 */ 1461, 105, 381, 1373, 34, 571, 99, 1332, 557, 314,
- /* 1910 */ 1182, 530, 272, 274, 379, 210, 1331, 547, 385, 386,
- /* 1920 */ 275, 572, 1247, 1242, 411, 412, 1512, 165, 178, 1513,
- /* 1930 */ 1014, 1014, 1016, 1017, 27, 1511, 1510, 1024, 78, 147,
- /* 1940 */ 166, 220, 221, 109, 109, 834, 304, 167, 446, 212,
- /* 1950 */ 318, 110, 231, 445, 569, 445, 144, 1086, 1014, 1084,
- /* 1960 */ 326, 180, 169, 1207, 182, 334, 238, 913, 241, 1100,
- /* 1970 */ 187, 170, 171, 421, 87, 88, 423, 189, 89, 90,
- /* 1980 */ 172, 1103, 243, 1099, 244, 158, 18, 245, 345, 247,
- /* 1990 */ 1014, 1014, 1016, 1017, 27, 261, 1092, 193, 1222, 489,
- /* 2000 */ 194, 37, 366, 849, 494, 251, 195, 506, 92, 19,
- /* 2010 */ 498, 358, 20, 503, 879, 361, 94, 892, 305, 159,
- /* 2020 */ 513, 39, 95, 1170, 160, 1053, 964, 1139, 96, 174,
- /* 2030 */ 1138, 225, 280, 282, 198, 958, 113, 1160, 1156, 260,
- /* 2040 */ 21, 22, 23, 1158, 1164, 1163, 1144, 24, 33, 25,
- /* 2050 */ 202, 542, 26, 100, 1067, 102, 1054, 103, 7, 1052,
- /* 2060 */ 1056, 1109, 1057, 1108, 266, 267, 28, 40, 390, 1019,
- /* 2070 */ 861, 112, 29, 564, 1178, 1177, 268, 176, 143, 923,
- /* 2080 */ 1238, 1238, 1238, 1238, 1238, 1238, 1238, 1238, 1238, 1238,
- /* 2090 */ 1238, 1238, 1238, 1238, 269, 1595,
+ /* 0 */ 572, 210, 572, 119, 116, 231, 572, 119, 116, 231,
+ /* 10 */ 572, 1317, 379, 1296, 410, 566, 566, 566, 572, 411,
+ /* 20 */ 380, 1317, 1279, 42, 42, 42, 42, 210, 1529, 72,
+ /* 30 */ 72, 974, 421, 42, 42, 495, 305, 281, 305, 975,
+ /* 40 */ 399, 72, 72, 126, 127, 81, 1217, 1217, 1054, 1057,
+ /* 50 */ 1044, 1044, 124, 124, 125, 125, 125, 125, 480, 411,
+ /* 60 */ 1244, 1, 1, 578, 2, 1248, 554, 119, 116, 231,
+ /* 70 */ 319, 484, 147, 484, 528, 119, 116, 231, 533, 1330,
+ /* 80 */ 419, 527, 143, 126, 127, 81, 1217, 1217, 1054, 1057,
+ /* 90 */ 1044, 1044, 124, 124, 125, 125, 125, 125, 119, 116,
+ /* 100 */ 231, 329, 123, 123, 123, 123, 122, 122, 121, 121,
+ /* 110 */ 121, 120, 117, 448, 286, 286, 286, 286, 446, 446,
+ /* 120 */ 446, 1568, 378, 1570, 1193, 377, 1164, 569, 1164, 569,
+ /* 130 */ 411, 1568, 541, 261, 228, 448, 102, 146, 453, 318,
+ /* 140 */ 563, 242, 123, 123, 123, 123, 122, 122, 121, 121,
+ /* 150 */ 121, 120, 117, 448, 126, 127, 81, 1217, 1217, 1054,
+ /* 160 */ 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, 143,
+ /* 170 */ 296, 1193, 341, 452, 121, 121, 121, 120, 117, 448,
+ /* 180 */ 128, 1193, 1194, 1193, 149, 445, 444, 572, 120, 117,
+ /* 190 */ 448, 125, 125, 125, 125, 118, 123, 123, 123, 123,
+ /* 200 */ 122, 122, 121, 121, 121, 120, 117, 448, 458, 114,
+ /* 210 */ 13, 13, 550, 123, 123, 123, 123, 122, 122, 121,
+ /* 220 */ 121, 121, 120, 117, 448, 424, 318, 563, 1193, 1194,
+ /* 230 */ 1193, 150, 1225, 411, 1225, 125, 125, 125, 125, 123,
+ /* 240 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117,
+ /* 250 */ 448, 469, 344, 1041, 1041, 1055, 1058, 126, 127, 81,
+ /* 260 */ 1217, 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125,
+ /* 270 */ 125, 125, 1282, 526, 224, 1193, 572, 411, 226, 519,
+ /* 280 */ 177, 83, 84, 123, 123, 123, 123, 122, 122, 121,
+ /* 290 */ 121, 121, 120, 117, 448, 1010, 16, 16, 1193, 134,
+ /* 300 */ 134, 126, 127, 81, 1217, 1217, 1054, 1057, 1044, 1044,
+ /* 310 */ 124, 124, 125, 125, 125, 125, 123, 123, 123, 123,
+ /* 320 */ 122, 122, 121, 121, 121, 120, 117, 448, 1045, 550,
+ /* 330 */ 1193, 375, 1193, 1194, 1193, 254, 1438, 401, 508, 505,
+ /* 340 */ 504, 112, 564, 570, 4, 929, 929, 435, 503, 342,
+ /* 350 */ 464, 330, 362, 396, 1238, 1193, 1194, 1193, 567, 572,
+ /* 360 */ 123, 123, 123, 123, 122, 122, 121, 121, 121, 120,
+ /* 370 */ 117, 448, 286, 286, 371, 1581, 1607, 445, 444, 155,
+ /* 380 */ 411, 449, 72, 72, 1289, 569, 1222, 1193, 1194, 1193,
+ /* 390 */ 86, 1224, 273, 561, 547, 520, 520, 572, 99, 1223,
+ /* 400 */ 6, 1281, 476, 143, 126, 127, 81, 1217, 1217, 1054,
+ /* 410 */ 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, 554,
+ /* 420 */ 13, 13, 1031, 511, 1225, 1193, 1225, 553, 110, 110,
+ /* 430 */ 224, 572, 1239, 177, 572, 429, 111, 199, 449, 573,
+ /* 440 */ 449, 432, 1555, 1019, 327, 555, 1193, 272, 289, 370,
+ /* 450 */ 514, 365, 513, 259, 72, 72, 547, 72, 72, 361,
+ /* 460 */ 318, 563, 1613, 123, 123, 123, 123, 122, 122, 121,
+ /* 470 */ 121, 121, 120, 117, 448, 1019, 1019, 1021, 1022, 28,
+ /* 480 */ 286, 286, 1193, 1194, 1193, 1159, 572, 1612, 411, 904,
+ /* 490 */ 192, 554, 358, 569, 554, 940, 537, 521, 1159, 437,
+ /* 500 */ 415, 1159, 556, 1193, 1194, 1193, 572, 548, 548, 52,
+ /* 510 */ 52, 216, 126, 127, 81, 1217, 1217, 1054, 1057, 1044,
+ /* 520 */ 1044, 124, 124, 125, 125, 125, 125, 1193, 478, 136,
+ /* 530 */ 136, 411, 286, 286, 1493, 509, 122, 122, 121, 121,
+ /* 540 */ 121, 120, 117, 448, 1010, 569, 522, 219, 545, 545,
+ /* 550 */ 318, 563, 143, 6, 536, 126, 127, 81, 1217, 1217,
+ /* 560 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125,
+ /* 570 */ 1557, 123, 123, 123, 123, 122, 122, 121, 121, 121,
+ /* 580 */ 120, 117, 448, 489, 1193, 1194, 1193, 486, 283, 1270,
+ /* 590 */ 960, 254, 1193, 375, 508, 505, 504, 1193, 342, 574,
+ /* 600 */ 1193, 574, 411, 294, 503, 960, 879, 193, 484, 318,
+ /* 610 */ 563, 386, 292, 382, 123, 123, 123, 123, 122, 122,
+ /* 620 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217,
+ /* 630 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 640 */ 125, 411, 396, 1139, 1193, 872, 101, 286, 286, 1193,
+ /* 650 */ 1194, 1193, 375, 1096, 1193, 1194, 1193, 1193, 1194, 1193,
+ /* 660 */ 569, 459, 33, 375, 235, 126, 127, 81, 1217, 1217,
+ /* 670 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125,
+ /* 680 */ 1437, 962, 572, 230, 961, 123, 123, 123, 123, 122,
+ /* 690 */ 122, 121, 121, 121, 120, 117, 448, 1159, 230, 1193,
+ /* 700 */ 158, 1193, 1194, 1193, 1556, 13, 13, 303, 960, 1233,
+ /* 710 */ 1159, 154, 411, 1159, 375, 1584, 1177, 5, 371, 1581,
+ /* 720 */ 431, 1239, 3, 960, 123, 123, 123, 123, 122, 122,
+ /* 730 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217,
+ /* 740 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 750 */ 125, 411, 210, 571, 1193, 1032, 1193, 1194, 1193, 1193,
+ /* 760 */ 390, 855, 156, 1555, 376, 404, 1101, 1101, 492, 572,
+ /* 770 */ 469, 344, 1322, 1322, 1555, 126, 127, 81, 1217, 1217,
+ /* 780 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125,
+ /* 790 */ 130, 572, 13, 13, 532, 123, 123, 123, 123, 122,
+ /* 800 */ 122, 121, 121, 121, 120, 117, 448, 304, 572, 457,
+ /* 810 */ 229, 1193, 1194, 1193, 13, 13, 1193, 1194, 1193, 1300,
+ /* 820 */ 467, 1270, 411, 1320, 1320, 1555, 1015, 457, 456, 436,
+ /* 830 */ 301, 72, 72, 1268, 123, 123, 123, 123, 122, 122,
+ /* 840 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217,
+ /* 850 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 860 */ 125, 411, 384, 1076, 1159, 286, 286, 421, 314, 280,
+ /* 870 */ 280, 287, 287, 461, 408, 407, 1539, 1159, 569, 572,
+ /* 880 */ 1159, 1196, 569, 409, 569, 126, 127, 81, 1217, 1217,
+ /* 890 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125,
+ /* 900 */ 457, 1485, 13, 13, 1541, 123, 123, 123, 123, 122,
+ /* 910 */ 122, 121, 121, 121, 120, 117, 448, 202, 572, 462,
+ /* 920 */ 1587, 578, 2, 1248, 843, 844, 845, 1563, 319, 409,
+ /* 930 */ 147, 6, 411, 257, 256, 255, 208, 1330, 9, 1196,
+ /* 940 */ 264, 72, 72, 1436, 123, 123, 123, 123, 122, 122,
+ /* 950 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217,
+ /* 960 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 970 */ 125, 572, 286, 286, 572, 1213, 411, 577, 315, 1248,
+ /* 980 */ 421, 371, 1581, 356, 319, 569, 147, 495, 529, 1644,
+ /* 990 */ 397, 935, 495, 1330, 71, 71, 934, 72, 72, 242,
+ /* 1000 */ 1328, 105, 81, 1217, 1217, 1054, 1057, 1044, 1044, 124,
+ /* 1010 */ 124, 125, 125, 125, 125, 123, 123, 123, 123, 122,
+ /* 1020 */ 122, 121, 121, 121, 120, 117, 448, 1117, 286, 286,
+ /* 1030 */ 1422, 452, 1528, 1213, 443, 286, 286, 1492, 1355, 313,
+ /* 1040 */ 478, 569, 1118, 454, 351, 495, 354, 1266, 569, 209,
+ /* 1050 */ 572, 418, 179, 572, 1031, 242, 385, 1119, 523, 123,
+ /* 1060 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117,
+ /* 1070 */ 448, 1020, 108, 72, 72, 1019, 13, 13, 915, 572,
+ /* 1080 */ 1498, 572, 286, 286, 98, 530, 1537, 452, 916, 1334,
+ /* 1090 */ 1329, 203, 411, 286, 286, 569, 152, 211, 1498, 1500,
+ /* 1100 */ 426, 569, 56, 56, 57, 57, 569, 1019, 1019, 1021,
+ /* 1110 */ 447, 572, 411, 531, 12, 297, 126, 127, 81, 1217,
+ /* 1120 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 1130 */ 125, 572, 411, 867, 15, 15, 126, 127, 81, 1217,
+ /* 1140 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 1150 */ 125, 373, 529, 264, 44, 44, 126, 115, 81, 1217,
+ /* 1160 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 1170 */ 125, 1498, 478, 1271, 417, 123, 123, 123, 123, 122,
+ /* 1180 */ 122, 121, 121, 121, 120, 117, 448, 205, 1213, 495,
+ /* 1190 */ 430, 867, 468, 322, 495, 123, 123, 123, 123, 122,
+ /* 1200 */ 122, 121, 121, 121, 120, 117, 448, 572, 557, 1140,
+ /* 1210 */ 1642, 1422, 1642, 543, 572, 123, 123, 123, 123, 122,
+ /* 1220 */ 122, 121, 121, 121, 120, 117, 448, 572, 1422, 572,
+ /* 1230 */ 13, 13, 542, 323, 1325, 411, 334, 58, 58, 349,
+ /* 1240 */ 1422, 1170, 326, 286, 286, 549, 1213, 300, 895, 530,
+ /* 1250 */ 45, 45, 59, 59, 1140, 1643, 569, 1643, 565, 417,
+ /* 1260 */ 127, 81, 1217, 1217, 1054, 1057, 1044, 1044, 124, 124,
+ /* 1270 */ 125, 125, 125, 125, 1367, 373, 500, 290, 1193, 512,
+ /* 1280 */ 1366, 427, 394, 394, 393, 275, 391, 896, 1138, 852,
+ /* 1290 */ 478, 258, 1422, 1170, 463, 1159, 12, 331, 428, 333,
+ /* 1300 */ 1117, 460, 236, 258, 325, 460, 544, 1544, 1159, 1098,
+ /* 1310 */ 491, 1159, 324, 1098, 440, 1118, 335, 516, 123, 123,
+ /* 1320 */ 123, 123, 122, 122, 121, 121, 121, 120, 117, 448,
+ /* 1330 */ 1119, 318, 563, 1138, 572, 1193, 1194, 1193, 112, 564,
+ /* 1340 */ 201, 4, 238, 433, 935, 490, 285, 228, 1517, 934,
+ /* 1350 */ 170, 560, 572, 142, 1516, 567, 572, 60, 60, 572,
+ /* 1360 */ 416, 572, 441, 572, 535, 302, 875, 8, 487, 572,
+ /* 1370 */ 237, 572, 416, 572, 485, 61, 61, 572, 449, 62,
+ /* 1380 */ 62, 332, 63, 63, 46, 46, 47, 47, 361, 572,
+ /* 1390 */ 561, 572, 48, 48, 50, 50, 51, 51, 572, 295,
+ /* 1400 */ 64, 64, 482, 295, 539, 412, 471, 1031, 572, 538,
+ /* 1410 */ 318, 563, 65, 65, 66, 66, 409, 475, 572, 1031,
+ /* 1420 */ 572, 14, 14, 875, 1020, 110, 110, 409, 1019, 572,
+ /* 1430 */ 474, 67, 67, 111, 455, 449, 573, 449, 98, 317,
+ /* 1440 */ 1019, 132, 132, 133, 133, 572, 1561, 572, 974, 409,
+ /* 1450 */ 6, 1562, 68, 68, 1560, 6, 975, 572, 6, 1559,
+ /* 1460 */ 1019, 1019, 1021, 6, 346, 218, 101, 531, 53, 53,
+ /* 1470 */ 69, 69, 1019, 1019, 1021, 1022, 28, 1586, 1181, 451,
+ /* 1480 */ 70, 70, 290, 87, 215, 31, 1363, 394, 394, 393,
+ /* 1490 */ 275, 391, 350, 109, 852, 107, 572, 112, 564, 483,
+ /* 1500 */ 4, 1212, 572, 239, 153, 572, 39, 236, 1299, 325,
+ /* 1510 */ 112, 564, 1298, 4, 567, 572, 32, 324, 572, 54,
+ /* 1520 */ 54, 572, 1135, 353, 398, 165, 165, 567, 166, 166,
+ /* 1530 */ 572, 291, 355, 572, 17, 357, 572, 449, 77, 77,
+ /* 1540 */ 1313, 55, 55, 1297, 73, 73, 572, 238, 470, 561,
+ /* 1550 */ 449, 472, 364, 135, 135, 170, 74, 74, 142, 163,
+ /* 1560 */ 163, 374, 561, 539, 572, 321, 572, 886, 540, 137,
+ /* 1570 */ 137, 339, 1353, 422, 298, 237, 539, 572, 1031, 572,
+ /* 1580 */ 340, 538, 101, 369, 110, 110, 162, 131, 131, 164,
+ /* 1590 */ 164, 1031, 111, 368, 449, 573, 449, 110, 110, 1019,
+ /* 1600 */ 157, 157, 141, 141, 572, 111, 572, 449, 573, 449,
+ /* 1610 */ 412, 288, 1019, 572, 882, 318, 563, 572, 219, 572,
+ /* 1620 */ 241, 1012, 477, 263, 263, 894, 893, 140, 140, 138,
+ /* 1630 */ 138, 1019, 1019, 1021, 1022, 28, 139, 139, 525, 455,
+ /* 1640 */ 76, 76, 78, 78, 1019, 1019, 1021, 1022, 28, 1181,
+ /* 1650 */ 451, 572, 1083, 290, 112, 564, 1575, 4, 394, 394,
+ /* 1660 */ 393, 275, 391, 572, 1023, 852, 572, 479, 345, 263,
+ /* 1670 */ 101, 567, 882, 1376, 75, 75, 1421, 501, 236, 260,
+ /* 1680 */ 325, 112, 564, 359, 4, 101, 43, 43, 324, 49,
+ /* 1690 */ 49, 901, 902, 161, 449, 101, 977, 978, 567, 1079,
+ /* 1700 */ 1349, 260, 965, 932, 263, 114, 561, 1095, 517, 1095,
+ /* 1710 */ 1083, 1094, 865, 1094, 151, 933, 1144, 114, 238, 1361,
+ /* 1720 */ 558, 449, 1023, 559, 1426, 1278, 170, 1269, 1257, 142,
+ /* 1730 */ 1601, 1256, 1258, 561, 1594, 1031, 496, 278, 213, 1346,
+ /* 1740 */ 310, 110, 110, 939, 311, 312, 237, 11, 234, 111,
+ /* 1750 */ 221, 449, 573, 449, 293, 395, 1019, 1408, 337, 1403,
+ /* 1760 */ 1396, 338, 1031, 299, 343, 1413, 1412, 481, 110, 110,
+ /* 1770 */ 506, 402, 225, 1296, 206, 367, 111, 1358, 449, 573,
+ /* 1780 */ 449, 412, 1359, 1019, 1489, 1488, 318, 563, 1019, 1019,
+ /* 1790 */ 1021, 1022, 28, 562, 207, 220, 80, 564, 389, 4,
+ /* 1800 */ 1597, 1357, 552, 1356, 1233, 181, 267, 232, 1536, 1534,
+ /* 1810 */ 455, 1230, 420, 567, 82, 1019, 1019, 1021, 1022, 28,
+ /* 1820 */ 86, 217, 85, 1494, 190, 175, 183, 465, 185, 466,
+ /* 1830 */ 36, 1409, 186, 187, 188, 499, 449, 244, 37, 99,
+ /* 1840 */ 400, 1415, 1414, 488, 1417, 194, 473, 403, 561, 1483,
+ /* 1850 */ 248, 92, 1505, 494, 198, 279, 112, 564, 250, 4,
+ /* 1860 */ 348, 497, 405, 352, 1259, 251, 252, 515, 1316, 434,
+ /* 1870 */ 1315, 1314, 94, 567, 1307, 886, 1306, 1031, 226, 406,
+ /* 1880 */ 1611, 1610, 438, 110, 110, 1580, 1286, 524, 439, 308,
+ /* 1890 */ 266, 111, 1285, 449, 573, 449, 449, 309, 1019, 366,
+ /* 1900 */ 1284, 1609, 265, 1566, 1565, 442, 372, 1381, 561, 129,
+ /* 1910 */ 550, 1380, 10, 1470, 383, 106, 316, 551, 100, 35,
+ /* 1920 */ 534, 575, 212, 1339, 381, 387, 1187, 1338, 274, 276,
+ /* 1930 */ 1019, 1019, 1021, 1022, 28, 277, 413, 1031, 576, 1254,
+ /* 1940 */ 388, 1521, 1249, 110, 110, 167, 1522, 168, 148, 1520,
+ /* 1950 */ 1519, 111, 306, 449, 573, 449, 222, 223, 1019, 839,
+ /* 1960 */ 169, 79, 450, 214, 414, 233, 320, 145, 1093, 1091,
+ /* 1970 */ 328, 182, 171, 1212, 918, 184, 240, 336, 243, 1107,
+ /* 1980 */ 189, 172, 173, 423, 425, 88, 180, 191, 89, 90,
+ /* 1990 */ 1019, 1019, 1021, 1022, 28, 91, 174, 1110, 245, 1106,
+ /* 2000 */ 246, 159, 18, 247, 347, 1099, 263, 195, 1227, 493,
+ /* 2010 */ 249, 196, 38, 854, 498, 368, 253, 360, 897, 197,
+ /* 2020 */ 502, 93, 19, 20, 507, 884, 363, 510, 95, 307,
+ /* 2030 */ 160, 96, 518, 97, 1175, 1060, 1146, 40, 21, 227,
+ /* 2040 */ 176, 1145, 282, 284, 969, 200, 963, 114, 262, 1165,
+ /* 2050 */ 22, 23, 24, 1161, 1169, 25, 1163, 1150, 34, 26,
+ /* 2060 */ 1168, 546, 27, 204, 101, 103, 104, 1074, 7, 1061,
+ /* 2070 */ 1059, 1063, 1116, 1064, 1115, 268, 269, 29, 41, 270,
+ /* 2080 */ 1024, 866, 113, 30, 568, 392, 1183, 144, 178, 1182,
+ /* 2090 */ 271, 928, 1245, 1245, 1245, 1245, 1245, 1245, 1245, 1602,
};
static const YYCODETYPE yy_lookahead[] = {
/* 0 */ 193, 193, 193, 274, 275, 276, 193, 274, 275, 276,
@@ -168597,7 +171787,7 @@ static const YYCODETYPE yy_lookahead[] = {
/* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46,
/* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
/* 750 */ 57, 19, 193, 193, 59, 23, 116, 117, 118, 59,
- /* 760 */ 201, 21, 241, 304, 22, 206, 127, 128, 129, 193,
+ /* 760 */ 201, 21, 241, 304, 193, 206, 127, 128, 129, 193,
/* 770 */ 128, 129, 235, 236, 304, 43, 44, 45, 46, 47,
/* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
/* 790 */ 22, 193, 216, 217, 193, 102, 103, 104, 105, 106,
@@ -168608,129 +171798,129 @@ static const YYCODETYPE yy_lookahead[] = {
/* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46,
/* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
/* 860 */ 57, 19, 193, 123, 76, 239, 240, 193, 253, 239,
- /* 870 */ 240, 239, 240, 193, 106, 107, 193, 89, 252, 193,
- /* 880 */ 92, 59, 252, 141, 252, 43, 44, 45, 46, 47,
+ /* 870 */ 240, 239, 240, 244, 106, 107, 193, 89, 252, 193,
+ /* 880 */ 92, 59, 252, 254, 252, 43, 44, 45, 46, 47,
/* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
/* 900 */ 284, 161, 216, 217, 193, 102, 103, 104, 105, 106,
- /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 16,
- /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 25,
+ /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 244,
+ /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 254,
/* 930 */ 197, 313, 19, 127, 128, 129, 262, 204, 22, 117,
- /* 940 */ 24, 216, 217, 263, 102, 103, 104, 105, 106, 107,
+ /* 940 */ 24, 216, 217, 273, 102, 103, 104, 105, 106, 107,
/* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46,
/* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
/* 970 */ 57, 193, 239, 240, 193, 59, 19, 188, 253, 190,
- /* 980 */ 77, 226, 79, 193, 195, 252, 197, 193, 19, 301,
- /* 990 */ 302, 193, 193, 204, 216, 217, 226, 216, 217, 266,
+ /* 980 */ 193, 311, 312, 16, 195, 252, 197, 193, 19, 301,
+ /* 990 */ 302, 135, 193, 204, 216, 217, 140, 216, 217, 266,
/* 1000 */ 204, 159, 45, 46, 47, 48, 49, 50, 51, 52,
/* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106,
/* 1020 */ 107, 108, 109, 110, 111, 112, 113, 12, 239, 240,
- /* 1030 */ 232, 298, 238, 117, 253, 239, 240, 238, 259, 260,
- /* 1040 */ 193, 252, 27, 31, 193, 193, 142, 204, 252, 193,
- /* 1050 */ 193, 39, 262, 193, 100, 266, 278, 42, 204, 102,
+ /* 1030 */ 193, 298, 238, 117, 253, 239, 240, 238, 259, 260,
+ /* 1040 */ 193, 252, 27, 193, 77, 193, 79, 204, 252, 262,
+ /* 1050 */ 193, 299, 300, 193, 100, 266, 278, 42, 204, 102,
/* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
/* 1070 */ 113, 117, 159, 216, 217, 121, 216, 217, 63, 193,
- /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 238,
+ /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 240,
/* 1090 */ 238, 231, 19, 239, 240, 252, 22, 24, 211, 212,
- /* 1100 */ 24, 193, 216, 217, 216, 217, 252, 153, 154, 155,
- /* 1110 */ 253, 16, 19, 144, 213, 268, 43, 44, 45, 46,
+ /* 1100 */ 263, 252, 216, 217, 216, 217, 252, 153, 154, 155,
+ /* 1110 */ 253, 193, 19, 144, 213, 268, 43, 44, 45, 46,
/* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 1130 */ 57, 238, 19, 59, 193, 59, 43, 44, 45, 46,
+ /* 1130 */ 57, 193, 19, 59, 216, 217, 43, 44, 45, 46,
/* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 1150 */ 57, 22, 23, 193, 25, 193, 43, 44, 45, 46,
+ /* 1150 */ 57, 193, 19, 24, 216, 217, 43, 44, 45, 46,
/* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 1170 */ 57, 284, 77, 193, 79, 102, 103, 104, 105, 106,
- /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 193, 193,
- /* 1190 */ 193, 117, 291, 117, 232, 102, 103, 104, 105, 106,
- /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 204, 22, 23,
- /* 1210 */ 66, 25, 216, 217, 35, 102, 103, 104, 105, 106,
- /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 268, 85,
- /* 1230 */ 101, 193, 309, 309, 240, 19, 313, 313, 94, 208,
- /* 1240 */ 209, 193, 239, 240, 193, 66, 252, 19, 268, 244,
- /* 1250 */ 216, 217, 193, 74, 213, 252, 161, 19, 263, 254,
+ /* 1170 */ 57, 284, 193, 208, 209, 102, 103, 104, 105, 106,
+ /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 59, 193,
+ /* 1190 */ 232, 117, 291, 193, 193, 102, 103, 104, 105, 106,
+ /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 193, 204, 22,
+ /* 1210 */ 23, 193, 25, 66, 193, 102, 103, 104, 105, 106,
+ /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 193, 193,
+ /* 1230 */ 216, 217, 85, 193, 238, 19, 16, 216, 217, 238,
+ /* 1240 */ 193, 94, 193, 239, 240, 231, 117, 268, 35, 116,
+ /* 1250 */ 216, 217, 216, 217, 22, 23, 252, 25, 208, 209,
/* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- /* 1270 */ 54, 55, 56, 57, 193, 216, 217, 5, 59, 193,
- /* 1280 */ 19, 244, 10, 11, 12, 13, 14, 101, 309, 17,
- /* 1290 */ 146, 254, 313, 193, 193, 76, 115, 216, 217, 309,
- /* 1300 */ 12, 263, 30, 313, 32, 46, 87, 46, 89, 130,
- /* 1310 */ 193, 92, 40, 22, 263, 27, 216, 217, 102, 103,
+ /* 1270 */ 54, 55, 56, 57, 193, 193, 19, 5, 59, 66,
+ /* 1280 */ 193, 263, 10, 11, 12, 13, 14, 74, 101, 17,
+ /* 1290 */ 193, 46, 193, 146, 193, 76, 213, 77, 263, 79,
+ /* 1300 */ 12, 260, 30, 46, 32, 264, 87, 193, 89, 29,
+ /* 1310 */ 263, 92, 40, 33, 232, 27, 193, 108, 102, 103,
/* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
- /* 1330 */ 42, 150, 291, 216, 217, 116, 117, 118, 19, 20,
- /* 1340 */ 193, 22, 70, 260, 116, 193, 24, 264, 193, 263,
- /* 1350 */ 78, 63, 61, 81, 116, 36, 193, 260, 193, 29,
- /* 1360 */ 193, 264, 193, 33, 145, 193, 59, 48, 216, 217,
- /* 1370 */ 98, 216, 217, 193, 115, 193, 115, 193, 59, 216,
- /* 1380 */ 217, 216, 217, 216, 217, 216, 217, 255, 216, 217,
- /* 1390 */ 71, 193, 131, 193, 25, 65, 216, 217, 216, 217,
- /* 1400 */ 216, 217, 208, 209, 85, 133, 193, 100, 193, 90,
- /* 1410 */ 138, 139, 138, 139, 216, 217, 216, 217, 193, 100,
- /* 1420 */ 193, 108, 135, 116, 117, 106, 107, 140, 121, 216,
- /* 1430 */ 217, 216, 217, 114, 162, 116, 117, 118, 299, 300,
- /* 1440 */ 121, 216, 217, 216, 217, 193, 244, 193, 135, 244,
- /* 1450 */ 193, 256, 257, 140, 244, 193, 254, 193, 193, 254,
- /* 1460 */ 153, 154, 155, 141, 254, 149, 150, 258, 216, 217,
+ /* 1330 */ 42, 138, 139, 101, 193, 116, 117, 118, 19, 20,
+ /* 1340 */ 255, 22, 70, 130, 135, 65, 256, 257, 193, 140,
+ /* 1350 */ 78, 63, 193, 81, 193, 36, 193, 216, 217, 193,
+ /* 1360 */ 115, 193, 263, 193, 145, 268, 59, 48, 193, 193,
+ /* 1370 */ 98, 193, 115, 193, 291, 216, 217, 193, 59, 216,
+ /* 1380 */ 217, 161, 216, 217, 216, 217, 216, 217, 131, 193,
+ /* 1390 */ 71, 193, 216, 217, 216, 217, 216, 217, 193, 260,
+ /* 1400 */ 216, 217, 19, 264, 85, 133, 244, 100, 193, 90,
+ /* 1410 */ 138, 139, 216, 217, 216, 217, 254, 244, 193, 100,
+ /* 1420 */ 193, 216, 217, 116, 117, 106, 107, 254, 121, 193,
+ /* 1430 */ 115, 216, 217, 114, 162, 116, 117, 118, 115, 244,
+ /* 1440 */ 121, 216, 217, 216, 217, 193, 309, 193, 31, 254,
+ /* 1450 */ 313, 309, 216, 217, 309, 313, 39, 193, 313, 309,
+ /* 1460 */ 153, 154, 155, 313, 193, 150, 25, 144, 216, 217,
/* 1470 */ 216, 217, 153, 154, 155, 156, 157, 0, 1, 2,
- /* 1480 */ 216, 217, 5, 115, 158, 193, 160, 10, 11, 12,
- /* 1490 */ 13, 14, 193, 59, 17, 126, 193, 19, 20, 129,
- /* 1500 */ 22, 193, 22, 22, 24, 193, 23, 30, 25, 32,
- /* 1510 */ 19, 20, 144, 22, 36, 216, 217, 40, 193, 216,
- /* 1520 */ 217, 193, 152, 129, 216, 217, 193, 36, 216, 217,
- /* 1530 */ 193, 99, 193, 193, 53, 193, 193, 59, 23, 193,
- /* 1540 */ 25, 216, 217, 193, 216, 217, 152, 70, 59, 71,
- /* 1550 */ 59, 117, 193, 216, 217, 78, 216, 217, 81, 216,
- /* 1560 */ 217, 318, 71, 85, 193, 133, 193, 193, 90, 23,
- /* 1570 */ 23, 25, 25, 120, 121, 98, 85, 193, 100, 193,
- /* 1580 */ 23, 90, 25, 121, 106, 107, 19, 216, 217, 216,
+ /* 1480 */ 216, 217, 5, 149, 150, 22, 193, 10, 11, 12,
+ /* 1490 */ 13, 14, 193, 158, 17, 160, 193, 19, 20, 116,
+ /* 1500 */ 22, 25, 193, 24, 22, 193, 24, 30, 226, 32,
+ /* 1510 */ 19, 20, 226, 22, 36, 193, 53, 40, 193, 216,
+ /* 1520 */ 217, 193, 23, 193, 25, 216, 217, 36, 216, 217,
+ /* 1530 */ 193, 99, 193, 193, 22, 193, 193, 59, 216, 217,
+ /* 1540 */ 193, 216, 217, 193, 216, 217, 193, 70, 129, 71,
+ /* 1550 */ 59, 129, 193, 216, 217, 78, 216, 217, 81, 216,
+ /* 1560 */ 217, 193, 71, 85, 193, 133, 193, 126, 90, 216,
+ /* 1570 */ 217, 152, 258, 61, 152, 98, 85, 193, 100, 193,
+ /* 1580 */ 23, 90, 25, 121, 106, 107, 23, 216, 217, 216,
/* 1590 */ 217, 100, 114, 131, 116, 117, 118, 106, 107, 121,
- /* 1600 */ 216, 217, 216, 217, 193, 114, 117, 116, 117, 118,
- /* 1610 */ 133, 193, 121, 193, 193, 138, 139, 193, 23, 193,
- /* 1620 */ 25, 23, 23, 25, 25, 7, 8, 216, 217, 193,
- /* 1630 */ 193, 153, 154, 155, 156, 157, 216, 217, 193, 162,
+ /* 1600 */ 216, 217, 216, 217, 193, 114, 193, 116, 117, 118,
+ /* 1610 */ 133, 22, 121, 193, 59, 138, 139, 193, 142, 193,
+ /* 1620 */ 141, 23, 23, 25, 25, 120, 121, 216, 217, 216,
+ /* 1630 */ 217, 153, 154, 155, 156, 157, 216, 217, 19, 162,
/* 1640 */ 216, 217, 216, 217, 153, 154, 155, 156, 157, 1,
- /* 1650 */ 2, 193, 193, 5, 19, 20, 59, 22, 10, 11,
- /* 1660 */ 12, 13, 14, 193, 97, 17, 193, 23, 193, 25,
- /* 1670 */ 288, 36, 193, 242, 216, 217, 236, 23, 30, 25,
+ /* 1650 */ 2, 193, 59, 5, 19, 20, 318, 22, 10, 11,
+ /* 1660 */ 12, 13, 14, 193, 59, 17, 193, 23, 23, 25,
+ /* 1670 */ 25, 36, 117, 193, 216, 217, 193, 23, 30, 25,
/* 1680 */ 32, 19, 20, 23, 22, 25, 216, 217, 40, 216,
- /* 1690 */ 217, 216, 217, 193, 59, 216, 217, 193, 36, 83,
- /* 1700 */ 84, 153, 153, 155, 155, 23, 71, 25, 23, 193,
- /* 1710 */ 25, 193, 193, 193, 117, 193, 193, 193, 70, 193,
- /* 1720 */ 193, 59, 193, 255, 255, 287, 78, 255, 243, 81,
- /* 1730 */ 191, 255, 297, 71, 271, 100, 293, 245, 267, 214,
- /* 1740 */ 246, 106, 107, 108, 246, 271, 98, 245, 293, 114,
- /* 1750 */ 220, 116, 117, 118, 267, 271, 121, 271, 225, 219,
- /* 1760 */ 229, 219, 100, 219, 259, 259, 259, 259, 106, 107,
- /* 1770 */ 249, 196, 60, 280, 141, 243, 114, 249, 116, 117,
- /* 1780 */ 118, 133, 245, 121, 200, 297, 138, 139, 153, 154,
- /* 1790 */ 155, 156, 157, 297, 200, 38, 19, 20, 151, 22,
- /* 1800 */ 200, 150, 140, 294, 294, 22, 272, 43, 234, 18,
- /* 1810 */ 162, 270, 200, 36, 237, 153, 154, 155, 156, 157,
- /* 1820 */ 237, 283, 237, 237, 18, 199, 149, 246, 272, 270,
- /* 1830 */ 272, 200, 158, 246, 246, 234, 59, 234, 246, 199,
- /* 1840 */ 290, 62, 289, 200, 199, 22, 221, 115, 71, 200,
- /* 1850 */ 200, 199, 199, 221, 218, 218, 19, 20, 64, 22,
- /* 1860 */ 218, 227, 22, 224, 126, 224, 165, 221, 24, 305,
- /* 1870 */ 200, 113, 312, 36, 218, 220, 218, 100, 282, 218,
- /* 1880 */ 91, 218, 317, 106, 107, 221, 227, 282, 317, 82,
- /* 1890 */ 148, 114, 265, 116, 117, 118, 59, 145, 121, 22,
- /* 1900 */ 277, 158, 200, 265, 25, 202, 147, 250, 71, 279,
- /* 1910 */ 13, 146, 194, 194, 249, 248, 250, 140, 247, 246,
- /* 1920 */ 6, 192, 192, 192, 303, 303, 213, 207, 300, 213,
- /* 1930 */ 153, 154, 155, 156, 157, 213, 213, 100, 213, 222,
- /* 1940 */ 207, 214, 214, 106, 107, 4, 222, 207, 3, 22,
- /* 1950 */ 163, 114, 15, 116, 117, 118, 16, 23, 121, 23,
- /* 1960 */ 139, 151, 130, 25, 142, 16, 24, 20, 144, 1,
- /* 1970 */ 142, 130, 130, 61, 53, 53, 37, 151, 53, 53,
- /* 1980 */ 130, 116, 34, 1, 141, 5, 22, 115, 161, 141,
- /* 1990 */ 153, 154, 155, 156, 157, 25, 68, 68, 75, 41,
- /* 2000 */ 115, 24, 131, 20, 19, 125, 22, 96, 22, 22,
- /* 2010 */ 67, 23, 22, 67, 59, 24, 22, 28, 67, 23,
- /* 2020 */ 22, 22, 149, 23, 23, 23, 116, 23, 25, 37,
- /* 2030 */ 97, 141, 23, 23, 22, 143, 25, 75, 88, 34,
- /* 2040 */ 34, 34, 34, 86, 75, 93, 23, 34, 22, 34,
- /* 2050 */ 25, 24, 34, 25, 23, 142, 23, 142, 44, 23,
- /* 2060 */ 23, 23, 11, 23, 25, 22, 22, 22, 15, 23,
- /* 2070 */ 23, 22, 22, 25, 1, 1, 141, 25, 23, 135,
- /* 2080 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
- /* 2090 */ 319, 319, 319, 319, 141, 141, 319, 319, 319, 319,
+ /* 1690 */ 217, 7, 8, 23, 59, 25, 83, 84, 36, 23,
+ /* 1700 */ 193, 25, 23, 23, 25, 25, 71, 153, 145, 155,
+ /* 1710 */ 117, 153, 23, 155, 25, 23, 97, 25, 70, 193,
+ /* 1720 */ 193, 59, 117, 236, 193, 193, 78, 193, 193, 81,
+ /* 1730 */ 141, 193, 193, 71, 193, 100, 288, 287, 242, 255,
+ /* 1740 */ 255, 106, 107, 108, 255, 255, 98, 243, 297, 114,
+ /* 1750 */ 214, 116, 117, 118, 245, 191, 121, 271, 293, 267,
+ /* 1760 */ 267, 246, 100, 246, 245, 271, 271, 293, 106, 107,
+ /* 1770 */ 220, 271, 229, 225, 249, 219, 114, 259, 116, 117,
+ /* 1780 */ 118, 133, 259, 121, 219, 219, 138, 139, 153, 154,
+ /* 1790 */ 155, 156, 157, 280, 249, 243, 19, 20, 245, 22,
+ /* 1800 */ 196, 259, 140, 259, 60, 297, 141, 297, 200, 200,
+ /* 1810 */ 162, 38, 200, 36, 294, 153, 154, 155, 156, 157,
+ /* 1820 */ 151, 150, 294, 283, 22, 43, 234, 18, 237, 200,
+ /* 1830 */ 270, 272, 237, 237, 237, 18, 59, 199, 270, 149,
+ /* 1840 */ 246, 272, 272, 200, 234, 234, 246, 246, 71, 246,
+ /* 1850 */ 199, 158, 290, 62, 22, 200, 19, 20, 199, 22,
+ /* 1860 */ 289, 221, 221, 200, 200, 199, 199, 115, 218, 64,
+ /* 1870 */ 218, 218, 22, 36, 227, 126, 227, 100, 165, 221,
+ /* 1880 */ 224, 224, 24, 106, 107, 312, 218, 305, 113, 282,
+ /* 1890 */ 91, 114, 220, 116, 117, 118, 59, 282, 121, 218,
+ /* 1900 */ 218, 218, 200, 317, 317, 82, 221, 265, 71, 148,
+ /* 1910 */ 145, 265, 22, 277, 200, 158, 279, 140, 147, 25,
+ /* 1920 */ 146, 202, 248, 250, 249, 247, 13, 250, 194, 194,
+ /* 1930 */ 153, 154, 155, 156, 157, 6, 303, 100, 192, 192,
+ /* 1940 */ 246, 213, 192, 106, 107, 207, 213, 207, 222, 213,
+ /* 1950 */ 213, 114, 222, 116, 117, 118, 214, 214, 121, 4,
+ /* 1960 */ 207, 213, 3, 22, 303, 15, 163, 16, 23, 23,
+ /* 1970 */ 139, 151, 130, 25, 20, 142, 24, 16, 144, 1,
+ /* 1980 */ 142, 130, 130, 61, 37, 53, 300, 151, 53, 53,
+ /* 1990 */ 153, 154, 155, 156, 157, 53, 130, 116, 34, 1,
+ /* 2000 */ 141, 5, 22, 115, 161, 68, 25, 68, 75, 41,
+ /* 2010 */ 141, 115, 24, 20, 19, 131, 125, 23, 28, 22,
+ /* 2020 */ 67, 22, 22, 22, 67, 59, 24, 96, 22, 67,
+ /* 2030 */ 23, 149, 22, 25, 23, 23, 23, 22, 34, 141,
+ /* 2040 */ 37, 97, 23, 23, 116, 22, 143, 25, 34, 75,
+ /* 2050 */ 34, 34, 34, 88, 75, 34, 86, 23, 22, 34,
+ /* 2060 */ 93, 24, 34, 25, 25, 142, 142, 23, 44, 23,
+ /* 2070 */ 23, 23, 23, 11, 23, 25, 22, 22, 22, 141,
+ /* 2080 */ 23, 23, 22, 22, 25, 15, 1, 23, 25, 1,
+ /* 2090 */ 141, 135, 319, 319, 319, 319, 319, 319, 319, 141,
/* 2100 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2110 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2120 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
@@ -168749,176 +171939,177 @@ static const YYCODETYPE yy_lookahead[] = {
/* 2250 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2260 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2270 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
- /* 2280 */ 319,
+ /* 2280 */ 319, 319, 319, 319, 319,
};
-#define YY_SHIFT_COUNT (574)
+#define YY_SHIFT_COUNT (578)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (2074)
+#define YY_SHIFT_MAX (2088)
static const unsigned short int yy_shift_ofst[] = {
/* 0 */ 1648, 1477, 1272, 322, 322, 1, 1319, 1478, 1491, 1837,
/* 10 */ 1837, 1837, 471, 0, 0, 214, 1093, 1837, 1837, 1837,
/* 20 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
- /* 30 */ 271, 271, 1219, 1219, 216, 88, 1, 1, 1, 1,
- /* 40 */ 1, 40, 111, 258, 361, 469, 512, 583, 622, 693,
- /* 50 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093,
+ /* 30 */ 1837, 271, 271, 1219, 1219, 216, 88, 1, 1, 1,
+ /* 40 */ 1, 1, 40, 111, 258, 361, 469, 512, 583, 622,
+ /* 50 */ 693, 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093,
/* 60 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093,
- /* 70 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635, 1662,
- /* 80 */ 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
+ /* 70 */ 1093, 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635,
+ /* 80 */ 1662, 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
/* 90 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
/* 100 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
/* 110 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
/* 120 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
- /* 130 */ 137, 181, 181, 181, 181, 181, 181, 181, 94, 430,
- /* 140 */ 66, 65, 112, 366, 533, 533, 740, 1261, 533, 533,
- /* 150 */ 79, 79, 533, 412, 412, 412, 77, 412, 123, 113,
- /* 160 */ 113, 22, 22, 2096, 2096, 328, 328, 328, 239, 468,
- /* 170 */ 468, 468, 468, 1015, 1015, 409, 366, 1129, 1186, 533,
- /* 180 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533,
- /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 969,
- /* 200 */ 621, 621, 533, 642, 788, 788, 1228, 1228, 822, 822,
- /* 210 */ 67, 1274, 2096, 2096, 2096, 2096, 2096, 2096, 2096, 1307,
- /* 220 */ 954, 954, 585, 472, 640, 387, 695, 538, 541, 700,
- /* 230 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533,
- /* 240 */ 222, 533, 533, 533, 533, 533, 533, 533, 533, 533,
- /* 250 */ 533, 533, 533, 1179, 1179, 1179, 533, 533, 533, 565,
- /* 260 */ 533, 533, 533, 916, 1144, 533, 533, 1288, 533, 533,
- /* 270 */ 533, 533, 533, 533, 533, 533, 639, 1330, 209, 1076,
- /* 280 */ 1076, 1076, 1076, 580, 209, 209, 1313, 768, 917, 649,
- /* 290 */ 1181, 1316, 405, 1316, 1238, 249, 1181, 1181, 249, 1181,
- /* 300 */ 405, 1238, 1369, 464, 1259, 1012, 1012, 1012, 1368, 1368,
- /* 310 */ 1368, 1368, 184, 184, 1326, 904, 1287, 1480, 1712, 1712,
- /* 320 */ 1633, 1633, 1757, 1757, 1633, 1647, 1651, 1783, 1764, 1791,
- /* 330 */ 1791, 1791, 1791, 1633, 1806, 1677, 1651, 1651, 1677, 1783,
- /* 340 */ 1764, 1677, 1764, 1677, 1633, 1806, 1674, 1779, 1633, 1806,
- /* 350 */ 1823, 1633, 1806, 1633, 1806, 1823, 1732, 1732, 1732, 1794,
- /* 360 */ 1840, 1840, 1823, 1732, 1738, 1732, 1794, 1732, 1732, 1701,
- /* 370 */ 1844, 1758, 1758, 1823, 1633, 1789, 1789, 1807, 1807, 1742,
- /* 380 */ 1752, 1877, 1633, 1743, 1742, 1759, 1765, 1677, 1879, 1897,
- /* 390 */ 1897, 1914, 1914, 1914, 2096, 2096, 2096, 2096, 2096, 2096,
- /* 400 */ 2096, 2096, 2096, 2096, 2096, 2096, 2096, 2096, 2096, 207,
- /* 410 */ 1095, 331, 620, 903, 806, 1074, 1483, 1432, 1481, 1322,
- /* 420 */ 1370, 1394, 1515, 1291, 1546, 1547, 1557, 1595, 1598, 1599,
- /* 430 */ 1434, 1453, 1618, 1462, 1567, 1489, 1644, 1654, 1616, 1660,
- /* 440 */ 1548, 1549, 1682, 1685, 1597, 742, 1941, 1945, 1927, 1787,
- /* 450 */ 1937, 1940, 1934, 1936, 1821, 1810, 1832, 1938, 1938, 1942,
- /* 460 */ 1822, 1947, 1824, 1949, 1968, 1828, 1841, 1938, 1842, 1912,
- /* 470 */ 1939, 1938, 1826, 1921, 1922, 1925, 1926, 1850, 1865, 1948,
- /* 480 */ 1843, 1982, 1980, 1964, 1872, 1827, 1928, 1970, 1929, 1923,
- /* 490 */ 1958, 1848, 1885, 1977, 1983, 1985, 1871, 1880, 1984, 1943,
- /* 500 */ 1986, 1987, 1988, 1990, 1946, 1955, 1991, 1911, 1989, 1994,
- /* 510 */ 1951, 1992, 1996, 1873, 1998, 2000, 2001, 2002, 2003, 2004,
- /* 520 */ 1999, 1933, 1890, 2009, 2010, 1910, 2005, 2012, 1892, 2011,
- /* 530 */ 2006, 2007, 2008, 2013, 1950, 1962, 1957, 2014, 1969, 1952,
- /* 540 */ 2015, 2023, 2026, 2027, 2025, 2028, 2018, 1913, 1915, 2031,
- /* 550 */ 2011, 2033, 2036, 2037, 2038, 2039, 2040, 2043, 2051, 2044,
- /* 560 */ 2045, 2046, 2047, 2049, 2050, 2048, 1944, 1935, 1953, 1954,
- /* 570 */ 2052, 2055, 2053, 2073, 2074,
+ /* 130 */ 1837, 137, 181, 181, 181, 181, 181, 181, 181, 94,
+ /* 140 */ 430, 66, 65, 112, 366, 533, 533, 740, 1257, 533,
+ /* 150 */ 533, 79, 79, 533, 412, 412, 412, 77, 412, 123,
+ /* 160 */ 113, 113, 113, 22, 22, 2100, 2100, 328, 328, 328,
+ /* 170 */ 239, 468, 468, 468, 468, 1015, 1015, 409, 366, 1187,
+ /* 180 */ 1232, 533, 533, 533, 533, 533, 533, 533, 533, 533,
+ /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533,
+ /* 200 */ 533, 969, 621, 621, 533, 642, 788, 788, 1133, 1133,
+ /* 210 */ 822, 822, 67, 1193, 2100, 2100, 2100, 2100, 2100, 2100,
+ /* 220 */ 2100, 1307, 954, 954, 585, 472, 640, 387, 695, 538,
+ /* 230 */ 541, 700, 533, 533, 533, 533, 533, 533, 533, 533,
+ /* 240 */ 533, 533, 222, 533, 533, 533, 533, 533, 533, 533,
+ /* 250 */ 533, 533, 533, 533, 533, 1213, 1213, 1213, 533, 533,
+ /* 260 */ 533, 565, 533, 533, 533, 916, 1147, 533, 533, 1288,
+ /* 270 */ 533, 533, 533, 533, 533, 533, 533, 533, 639, 1280,
+ /* 280 */ 209, 1129, 1129, 1129, 1129, 580, 209, 209, 1209, 768,
+ /* 290 */ 917, 649, 1315, 1334, 405, 1334, 1383, 249, 1315, 1315,
+ /* 300 */ 249, 1315, 405, 1383, 1441, 464, 1245, 1417, 1417, 1417,
+ /* 310 */ 1323, 1323, 1323, 1323, 184, 184, 1335, 1476, 856, 1482,
+ /* 320 */ 1744, 1744, 1665, 1665, 1773, 1773, 1665, 1669, 1671, 1802,
+ /* 330 */ 1782, 1809, 1809, 1809, 1809, 1665, 1817, 1690, 1671, 1671,
+ /* 340 */ 1690, 1802, 1782, 1690, 1782, 1690, 1665, 1817, 1693, 1791,
+ /* 350 */ 1665, 1817, 1832, 1665, 1817, 1665, 1817, 1832, 1752, 1752,
+ /* 360 */ 1752, 1805, 1850, 1850, 1832, 1752, 1749, 1752, 1805, 1752,
+ /* 370 */ 1752, 1713, 1858, 1775, 1775, 1832, 1665, 1799, 1799, 1823,
+ /* 380 */ 1823, 1761, 1765, 1890, 1665, 1757, 1761, 1771, 1774, 1690,
+ /* 390 */ 1894, 1913, 1913, 1929, 1929, 1929, 2100, 2100, 2100, 2100,
+ /* 400 */ 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100,
+ /* 410 */ 2100, 207, 1220, 331, 620, 967, 806, 1074, 1499, 1432,
+ /* 420 */ 1463, 1479, 1419, 1422, 1557, 1512, 1598, 1599, 1644, 1645,
+ /* 430 */ 1654, 1660, 1555, 1505, 1684, 1462, 1670, 1563, 1619, 1593,
+ /* 440 */ 1676, 1679, 1613, 1680, 1554, 1558, 1689, 1692, 1605, 1589,
+ /* 450 */ 1955, 1959, 1941, 1803, 1950, 1951, 1945, 1946, 1831, 1820,
+ /* 460 */ 1842, 1948, 1948, 1952, 1833, 1954, 1834, 1961, 1978, 1838,
+ /* 470 */ 1851, 1948, 1852, 1922, 1947, 1948, 1836, 1932, 1935, 1936,
+ /* 480 */ 1942, 1866, 1881, 1964, 1859, 1998, 1996, 1980, 1888, 1843,
+ /* 490 */ 1937, 1981, 1939, 1933, 1968, 1869, 1896, 1988, 1993, 1995,
+ /* 500 */ 1884, 1891, 1997, 1953, 1999, 2000, 1994, 2001, 1957, 1966,
+ /* 510 */ 2002, 1931, 1990, 2006, 1962, 2003, 2007, 2004, 1882, 2010,
+ /* 520 */ 2011, 2012, 2008, 2013, 2015, 1944, 1898, 2019, 2020, 1928,
+ /* 530 */ 2014, 2023, 1903, 2022, 2016, 2017, 2018, 2021, 1965, 1974,
+ /* 540 */ 1970, 2024, 1979, 1967, 2025, 2034, 2036, 2037, 2038, 2039,
+ /* 550 */ 2028, 1923, 1924, 2044, 2022, 2046, 2047, 2048, 2049, 2050,
+ /* 560 */ 2051, 2054, 2062, 2055, 2056, 2057, 2058, 2060, 2061, 2059,
+ /* 570 */ 1956, 1938, 1949, 1958, 2063, 2064, 2070, 2085, 2088,
};
-#define YY_REDUCE_COUNT (408)
+#define YY_REDUCE_COUNT (410)
#define YY_REDUCE_MIN (-271)
-#define YY_REDUCE_MAX (1740)
+#define YY_REDUCE_MAX (1753)
static const short yy_reduce_ofst[] = {
/* 0 */ -125, 733, 789, 241, 293, -123, -193, -191, -183, -187,
/* 10 */ 166, 238, 133, -207, -199, -267, -176, -6, 204, 489,
- /* 20 */ 576, -175, 598, 686, 615, 725, 860, 778, 781, 857,
- /* 30 */ 616, 887, 87, 240, -192, 408, 626, 796, 843, 854,
- /* 40 */ 1003, -271, -271, -271, -271, -271, -271, -271, -271, -271,
+ /* 20 */ 576, 598, -175, 686, 860, 615, 725, 1014, 778, 781,
+ /* 30 */ 857, 616, 887, 87, 240, -192, 408, 626, 796, 843,
+ /* 40 */ 854, 1004, -271, -271, -271, -271, -271, -271, -271, -271,
/* 50 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271,
/* 60 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271,
- /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, 80, 83,
- /* 80 */ 313, 886, 888, 996, 1034, 1059, 1081, 1100, 1117, 1152,
- /* 90 */ 1155, 1163, 1165, 1167, 1169, 1172, 1180, 1182, 1184, 1198,
- /* 100 */ 1200, 1213, 1215, 1225, 1227, 1252, 1254, 1264, 1299, 1303,
- /* 110 */ 1308, 1312, 1325, 1328, 1337, 1340, 1343, 1371, 1373, 1384,
- /* 120 */ 1386, 1411, 1420, 1424, 1426, 1458, 1470, 1473, 1475, 1479,
- /* 130 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271,
- /* 140 */ -271, 138, 459, 396, -158, 470, 302, -212, 521, 201,
- /* 150 */ -195, -92, 559, 630, 632, 630, -271, 632, 901, 63,
- /* 160 */ 407, -271, -271, -271, -271, 161, 161, 161, 251, 335,
- /* 170 */ 847, 960, 980, 537, 588, 618, 628, 688, 688, -166,
- /* 180 */ -161, 674, 790, 794, 799, 851, 852, -122, 680, -120,
- /* 190 */ 995, 1038, 415, 1051, 893, 798, 962, 400, 1086, 779,
- /* 200 */ 923, 924, 263, 1041, 979, 990, 1083, 1097, 1031, 1194,
- /* 210 */ 362, 994, 1139, 1005, 1037, 1202, 1205, 1195, 1210, -194,
- /* 220 */ 56, 185, -135, 232, 522, 560, 601, 617, 669, 683,
- /* 230 */ 711, 856, 908, 941, 1048, 1101, 1147, 1257, 1262, 1265,
- /* 240 */ 392, 1292, 1333, 1339, 1342, 1346, 1350, 1359, 1374, 1418,
- /* 250 */ 1421, 1436, 1437, 593, 755, 770, 997, 1445, 1459, 1209,
- /* 260 */ 1500, 1504, 1516, 1132, 1243, 1518, 1519, 1440, 1520, 560,
- /* 270 */ 1522, 1523, 1524, 1526, 1527, 1529, 1382, 1438, 1431, 1468,
- /* 280 */ 1469, 1472, 1476, 1209, 1431, 1431, 1485, 1525, 1539, 1435,
- /* 290 */ 1463, 1471, 1492, 1487, 1443, 1494, 1474, 1484, 1498, 1486,
- /* 300 */ 1502, 1455, 1530, 1531, 1533, 1540, 1542, 1544, 1505, 1506,
- /* 310 */ 1507, 1508, 1521, 1528, 1493, 1537, 1532, 1575, 1488, 1496,
- /* 320 */ 1584, 1594, 1509, 1510, 1600, 1538, 1534, 1541, 1574, 1577,
- /* 330 */ 1583, 1585, 1586, 1612, 1626, 1581, 1556, 1558, 1587, 1559,
- /* 340 */ 1601, 1588, 1603, 1592, 1631, 1640, 1550, 1553, 1643, 1645,
- /* 350 */ 1625, 1649, 1652, 1650, 1653, 1632, 1636, 1637, 1642, 1634,
- /* 360 */ 1639, 1641, 1646, 1656, 1655, 1658, 1659, 1661, 1663, 1560,
- /* 370 */ 1564, 1596, 1605, 1664, 1670, 1565, 1571, 1627, 1638, 1657,
- /* 380 */ 1665, 1623, 1702, 1630, 1666, 1667, 1671, 1673, 1703, 1718,
- /* 390 */ 1719, 1729, 1730, 1731, 1621, 1622, 1628, 1720, 1713, 1716,
- /* 400 */ 1722, 1723, 1733, 1717, 1724, 1727, 1728, 1725, 1740,
+ /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, 80,
+ /* 80 */ 83, 313, 886, 888, 918, 938, 1021, 1034, 1036, 1141,
+ /* 90 */ 1159, 1163, 1166, 1168, 1170, 1176, 1178, 1180, 1184, 1196,
+ /* 100 */ 1198, 1205, 1215, 1225, 1227, 1236, 1252, 1254, 1264, 1303,
+ /* 110 */ 1309, 1312, 1322, 1325, 1328, 1337, 1340, 1343, 1353, 1371,
+ /* 120 */ 1373, 1384, 1386, 1411, 1413, 1420, 1424, 1426, 1458, 1470,
+ /* 130 */ 1473, -271, -271, -271, -271, -271, -271, -271, -271, -271,
+ /* 140 */ -271, -271, 138, 459, 396, -158, 470, 302, -212, 521,
+ /* 150 */ 201, -195, -92, 559, 630, 632, 630, -271, 632, 901,
+ /* 160 */ 63, 407, 670, -271, -271, -271, -271, 161, 161, 161,
+ /* 170 */ 251, 335, 847, 979, 1097, 537, 588, 618, 628, 688,
+ /* 180 */ 688, -166, -161, 674, 787, 794, 799, 852, 996, -122,
+ /* 190 */ 837, -120, 1018, 1035, 415, 1047, 1001, 958, 1082, 400,
+ /* 200 */ 1099, 779, 1137, 1142, 263, 1083, 1145, 1150, 1041, 1139,
+ /* 210 */ 965, 1050, 362, 849, 752, 629, 675, 1162, 1173, 1090,
+ /* 220 */ 1195, -194, 56, 185, -135, 232, 522, 560, 571, 601,
+ /* 230 */ 617, 669, 683, 711, 850, 893, 1000, 1040, 1049, 1081,
+ /* 240 */ 1087, 1101, 392, 1114, 1123, 1155, 1161, 1175, 1271, 1293,
+ /* 250 */ 1299, 1330, 1339, 1342, 1347, 593, 1282, 1286, 1350, 1359,
+ /* 260 */ 1368, 1314, 1480, 1483, 1507, 1085, 1338, 1526, 1527, 1487,
+ /* 270 */ 1531, 560, 1532, 1534, 1535, 1538, 1539, 1541, 1448, 1450,
+ /* 280 */ 1496, 1484, 1485, 1489, 1490, 1314, 1496, 1496, 1504, 1536,
+ /* 290 */ 1564, 1451, 1486, 1492, 1509, 1493, 1465, 1515, 1494, 1495,
+ /* 300 */ 1517, 1500, 1519, 1474, 1550, 1543, 1548, 1556, 1565, 1566,
+ /* 310 */ 1518, 1523, 1542, 1544, 1525, 1545, 1513, 1553, 1552, 1604,
+ /* 320 */ 1508, 1510, 1608, 1609, 1520, 1528, 1612, 1540, 1559, 1560,
+ /* 330 */ 1592, 1591, 1595, 1596, 1597, 1629, 1638, 1594, 1569, 1570,
+ /* 340 */ 1600, 1568, 1610, 1601, 1611, 1603, 1643, 1651, 1562, 1571,
+ /* 350 */ 1655, 1659, 1640, 1663, 1666, 1664, 1667, 1641, 1650, 1652,
+ /* 360 */ 1653, 1647, 1656, 1657, 1658, 1668, 1672, 1681, 1649, 1682,
+ /* 370 */ 1683, 1573, 1582, 1607, 1615, 1685, 1702, 1586, 1587, 1642,
+ /* 380 */ 1646, 1673, 1675, 1636, 1714, 1637, 1677, 1674, 1678, 1694,
+ /* 390 */ 1719, 1734, 1735, 1746, 1747, 1750, 1633, 1661, 1686, 1738,
+ /* 400 */ 1728, 1733, 1736, 1737, 1740, 1726, 1730, 1742, 1743, 1748,
+ /* 410 */ 1753,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 1641, 1641, 1641, 1469, 1236, 1347, 1236, 1236, 1236, 1469,
- /* 10 */ 1469, 1469, 1236, 1377, 1377, 1522, 1269, 1236, 1236, 1236,
- /* 20 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1468, 1236, 1236,
- /* 30 */ 1236, 1236, 1557, 1557, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 40 */ 1236, 1236, 1386, 1236, 1393, 1236, 1236, 1236, 1236, 1236,
- /* 50 */ 1470, 1471, 1236, 1236, 1236, 1521, 1523, 1486, 1400, 1399,
- /* 60 */ 1398, 1397, 1504, 1365, 1391, 1384, 1388, 1465, 1466, 1464,
- /* 70 */ 1619, 1471, 1470, 1236, 1387, 1433, 1449, 1432, 1236, 1236,
- /* 80 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 90 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 100 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 110 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 120 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 130 */ 1441, 1448, 1447, 1446, 1455, 1445, 1442, 1435, 1434, 1436,
- /* 140 */ 1437, 1236, 1236, 1260, 1236, 1236, 1257, 1311, 1236, 1236,
- /* 150 */ 1236, 1236, 1236, 1541, 1540, 1236, 1438, 1236, 1269, 1427,
- /* 160 */ 1426, 1452, 1439, 1451, 1450, 1529, 1593, 1592, 1487, 1236,
- /* 170 */ 1236, 1236, 1236, 1236, 1236, 1557, 1236, 1236, 1236, 1236,
- /* 180 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 190 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1367,
- /* 200 */ 1557, 1557, 1236, 1269, 1557, 1557, 1368, 1368, 1265, 1265,
- /* 210 */ 1371, 1236, 1536, 1338, 1338, 1338, 1338, 1347, 1338, 1236,
- /* 220 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 230 */ 1236, 1236, 1236, 1236, 1526, 1524, 1236, 1236, 1236, 1236,
- /* 240 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 250 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 260 */ 1236, 1236, 1236, 1343, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 270 */ 1236, 1236, 1236, 1236, 1236, 1586, 1236, 1499, 1325, 1343,
- /* 280 */ 1343, 1343, 1343, 1345, 1326, 1324, 1337, 1270, 1243, 1633,
- /* 290 */ 1403, 1392, 1344, 1392, 1630, 1390, 1403, 1403, 1390, 1403,
- /* 300 */ 1344, 1630, 1286, 1608, 1281, 1377, 1377, 1377, 1367, 1367,
- /* 310 */ 1367, 1367, 1371, 1371, 1467, 1344, 1337, 1236, 1633, 1633,
- /* 320 */ 1353, 1353, 1632, 1632, 1353, 1487, 1616, 1412, 1314, 1320,
- /* 330 */ 1320, 1320, 1320, 1353, 1254, 1390, 1616, 1616, 1390, 1412,
- /* 340 */ 1314, 1390, 1314, 1390, 1353, 1254, 1503, 1627, 1353, 1254,
- /* 350 */ 1477, 1353, 1254, 1353, 1254, 1477, 1312, 1312, 1312, 1301,
- /* 360 */ 1236, 1236, 1477, 1312, 1286, 1312, 1301, 1312, 1312, 1575,
- /* 370 */ 1236, 1481, 1481, 1477, 1353, 1567, 1567, 1380, 1380, 1385,
- /* 380 */ 1371, 1472, 1353, 1236, 1385, 1383, 1381, 1390, 1304, 1589,
- /* 390 */ 1589, 1585, 1585, 1585, 1638, 1638, 1536, 1601, 1269, 1269,
- /* 400 */ 1269, 1269, 1601, 1288, 1288, 1270, 1270, 1269, 1601, 1236,
- /* 410 */ 1236, 1236, 1236, 1236, 1236, 1596, 1236, 1531, 1488, 1357,
- /* 420 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 430 */ 1236, 1236, 1236, 1236, 1542, 1236, 1236, 1236, 1236, 1236,
- /* 440 */ 1236, 1236, 1236, 1236, 1236, 1417, 1236, 1239, 1533, 1236,
- /* 450 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1394, 1395, 1358,
- /* 460 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1409, 1236, 1236,
- /* 470 */ 1236, 1404, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 480 */ 1629, 1236, 1236, 1236, 1236, 1236, 1236, 1502, 1501, 1236,
- /* 490 */ 1236, 1355, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 500 */ 1236, 1236, 1236, 1236, 1236, 1284, 1236, 1236, 1236, 1236,
- /* 510 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 520 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1382,
- /* 530 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 540 */ 1236, 1236, 1236, 1236, 1572, 1372, 1236, 1236, 1236, 1236,
- /* 550 */ 1620, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 560 */ 1236, 1236, 1236, 1236, 1236, 1612, 1328, 1418, 1236, 1421,
- /* 570 */ 1258, 1236, 1248, 1236, 1236,
+ /* 0 */ 1648, 1648, 1648, 1478, 1243, 1354, 1243, 1243, 1243, 1478,
+ /* 10 */ 1478, 1478, 1243, 1384, 1384, 1531, 1276, 1243, 1243, 1243,
+ /* 20 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1477, 1243,
+ /* 30 */ 1243, 1243, 1243, 1564, 1564, 1243, 1243, 1243, 1243, 1243,
+ /* 40 */ 1243, 1243, 1243, 1393, 1243, 1400, 1243, 1243, 1243, 1243,
+ /* 50 */ 1243, 1479, 1480, 1243, 1243, 1243, 1530, 1532, 1495, 1407,
+ /* 60 */ 1406, 1405, 1404, 1513, 1372, 1398, 1391, 1395, 1474, 1475,
+ /* 70 */ 1473, 1626, 1480, 1479, 1243, 1394, 1442, 1458, 1441, 1243,
+ /* 80 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 90 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 100 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 110 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 120 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 130 */ 1243, 1450, 1457, 1456, 1455, 1464, 1454, 1451, 1444, 1443,
+ /* 140 */ 1445, 1446, 1243, 1243, 1267, 1243, 1243, 1264, 1318, 1243,
+ /* 150 */ 1243, 1243, 1243, 1243, 1550, 1549, 1243, 1447, 1243, 1276,
+ /* 160 */ 1435, 1434, 1433, 1461, 1448, 1460, 1459, 1538, 1600, 1599,
+ /* 170 */ 1496, 1243, 1243, 1243, 1243, 1243, 1243, 1564, 1243, 1243,
+ /* 180 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 190 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 200 */ 1243, 1374, 1564, 1564, 1243, 1276, 1564, 1564, 1375, 1375,
+ /* 210 */ 1272, 1272, 1378, 1243, 1545, 1345, 1345, 1345, 1345, 1354,
+ /* 220 */ 1345, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 230 */ 1243, 1243, 1243, 1243, 1243, 1243, 1535, 1533, 1243, 1243,
+ /* 240 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 250 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 260 */ 1243, 1243, 1243, 1243, 1243, 1350, 1243, 1243, 1243, 1243,
+ /* 270 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1593, 1243, 1508,
+ /* 280 */ 1332, 1350, 1350, 1350, 1350, 1352, 1333, 1331, 1344, 1277,
+ /* 290 */ 1250, 1640, 1410, 1399, 1351, 1399, 1637, 1397, 1410, 1410,
+ /* 300 */ 1397, 1410, 1351, 1637, 1293, 1615, 1288, 1384, 1384, 1384,
+ /* 310 */ 1374, 1374, 1374, 1374, 1378, 1378, 1476, 1351, 1344, 1243,
+ /* 320 */ 1640, 1640, 1360, 1360, 1639, 1639, 1360, 1496, 1623, 1419,
+ /* 330 */ 1321, 1327, 1327, 1327, 1327, 1360, 1261, 1397, 1623, 1623,
+ /* 340 */ 1397, 1419, 1321, 1397, 1321, 1397, 1360, 1261, 1512, 1634,
+ /* 350 */ 1360, 1261, 1486, 1360, 1261, 1360, 1261, 1486, 1319, 1319,
+ /* 360 */ 1319, 1308, 1243, 1243, 1486, 1319, 1293, 1319, 1308, 1319,
+ /* 370 */ 1319, 1582, 1243, 1490, 1490, 1486, 1360, 1574, 1574, 1387,
+ /* 380 */ 1387, 1392, 1378, 1481, 1360, 1243, 1392, 1390, 1388, 1397,
+ /* 390 */ 1311, 1596, 1596, 1592, 1592, 1592, 1645, 1645, 1545, 1608,
+ /* 400 */ 1276, 1276, 1276, 1276, 1608, 1295, 1295, 1277, 1277, 1276,
+ /* 410 */ 1608, 1243, 1243, 1243, 1243, 1243, 1243, 1603, 1243, 1540,
+ /* 420 */ 1497, 1364, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 430 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1551, 1243,
+ /* 440 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1424,
+ /* 450 */ 1243, 1246, 1542, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 460 */ 1243, 1401, 1402, 1365, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 470 */ 1243, 1416, 1243, 1243, 1243, 1411, 1243, 1243, 1243, 1243,
+ /* 480 */ 1243, 1243, 1243, 1243, 1636, 1243, 1243, 1243, 1243, 1243,
+ /* 490 */ 1243, 1511, 1510, 1243, 1243, 1362, 1243, 1243, 1243, 1243,
+ /* 500 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1291,
+ /* 510 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 520 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 530 */ 1243, 1243, 1243, 1389, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 540 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1579, 1379,
+ /* 550 */ 1243, 1243, 1243, 1243, 1627, 1243, 1243, 1243, 1243, 1243,
+ /* 560 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1619,
+ /* 570 */ 1335, 1425, 1243, 1428, 1265, 1243, 1255, 1243, 1243,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -169725,135 +172916,135 @@ static const char *const yyRuleName[] = {
/* 185 */ "expr ::= expr COLLATE ID|STRING",
/* 186 */ "expr ::= CAST LP expr AS typetoken RP",
/* 187 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP",
- /* 188 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP",
- /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over",
- /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over",
- /* 191 */ "term ::= CTIME_KW",
- /* 192 */ "expr ::= LP nexprlist COMMA expr RP",
- /* 193 */ "expr ::= expr AND expr",
- /* 194 */ "expr ::= expr OR expr",
- /* 195 */ "expr ::= expr LT|GT|GE|LE expr",
- /* 196 */ "expr ::= expr EQ|NE expr",
- /* 197 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr",
- /* 198 */ "expr ::= expr PLUS|MINUS expr",
- /* 199 */ "expr ::= expr STAR|SLASH|REM expr",
- /* 200 */ "expr ::= expr CONCAT expr",
- /* 201 */ "likeop ::= NOT LIKE_KW|MATCH",
- /* 202 */ "expr ::= expr likeop expr",
- /* 203 */ "expr ::= expr likeop expr ESCAPE expr",
- /* 204 */ "expr ::= expr ISNULL|NOTNULL",
- /* 205 */ "expr ::= expr NOT NULL",
- /* 206 */ "expr ::= expr IS expr",
- /* 207 */ "expr ::= expr IS NOT expr",
- /* 208 */ "expr ::= expr IS NOT DISTINCT FROM expr",
- /* 209 */ "expr ::= expr IS DISTINCT FROM expr",
- /* 210 */ "expr ::= NOT expr",
- /* 211 */ "expr ::= BITNOT expr",
- /* 212 */ "expr ::= PLUS|MINUS expr",
- /* 213 */ "expr ::= expr PTR expr",
- /* 214 */ "between_op ::= BETWEEN",
- /* 215 */ "between_op ::= NOT BETWEEN",
- /* 216 */ "expr ::= expr between_op expr AND expr",
- /* 217 */ "in_op ::= IN",
- /* 218 */ "in_op ::= NOT IN",
- /* 219 */ "expr ::= expr in_op LP exprlist RP",
- /* 220 */ "expr ::= LP select RP",
- /* 221 */ "expr ::= expr in_op LP select RP",
- /* 222 */ "expr ::= expr in_op nm dbnm paren_exprlist",
- /* 223 */ "expr ::= EXISTS LP select RP",
- /* 224 */ "expr ::= CASE case_operand case_exprlist case_else END",
- /* 225 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
- /* 226 */ "case_exprlist ::= WHEN expr THEN expr",
- /* 227 */ "case_else ::= ELSE expr",
- /* 228 */ "case_else ::=",
- /* 229 */ "case_operand ::=",
- /* 230 */ "exprlist ::=",
- /* 231 */ "nexprlist ::= nexprlist COMMA expr",
- /* 232 */ "nexprlist ::= expr",
- /* 233 */ "paren_exprlist ::=",
- /* 234 */ "paren_exprlist ::= LP exprlist RP",
- /* 235 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
- /* 236 */ "uniqueflag ::= UNIQUE",
- /* 237 */ "uniqueflag ::=",
- /* 238 */ "eidlist_opt ::=",
- /* 239 */ "eidlist_opt ::= LP eidlist RP",
- /* 240 */ "eidlist ::= eidlist COMMA nm collate sortorder",
- /* 241 */ "eidlist ::= nm collate sortorder",
- /* 242 */ "collate ::=",
- /* 243 */ "collate ::= COLLATE ID|STRING",
- /* 244 */ "cmd ::= DROP INDEX ifexists fullname",
- /* 245 */ "cmd ::= VACUUM vinto",
- /* 246 */ "cmd ::= VACUUM nm vinto",
- /* 247 */ "vinto ::= INTO expr",
- /* 248 */ "vinto ::=",
- /* 249 */ "cmd ::= PRAGMA nm dbnm",
- /* 250 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
- /* 251 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
- /* 252 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
- /* 253 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
- /* 254 */ "plus_num ::= PLUS INTEGER|FLOAT",
- /* 255 */ "minus_num ::= MINUS INTEGER|FLOAT",
- /* 256 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
- /* 257 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
- /* 258 */ "trigger_time ::= BEFORE|AFTER",
- /* 259 */ "trigger_time ::= INSTEAD OF",
- /* 260 */ "trigger_time ::=",
- /* 261 */ "trigger_event ::= DELETE|INSERT",
- /* 262 */ "trigger_event ::= UPDATE",
- /* 263 */ "trigger_event ::= UPDATE OF idlist",
- /* 264 */ "when_clause ::=",
- /* 265 */ "when_clause ::= WHEN expr",
- /* 266 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
- /* 267 */ "trigger_cmd_list ::= trigger_cmd SEMI",
- /* 268 */ "trnm ::= nm DOT nm",
- /* 269 */ "tridxby ::= INDEXED BY nm",
- /* 270 */ "tridxby ::= NOT INDEXED",
- /* 271 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt",
- /* 272 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt",
- /* 273 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt",
- /* 274 */ "trigger_cmd ::= scanpt select scanpt",
- /* 275 */ "expr ::= RAISE LP IGNORE RP",
- /* 276 */ "expr ::= RAISE LP raisetype COMMA nm RP",
- /* 277 */ "raisetype ::= ROLLBACK",
- /* 278 */ "raisetype ::= ABORT",
- /* 279 */ "raisetype ::= FAIL",
- /* 280 */ "cmd ::= DROP TRIGGER ifexists fullname",
- /* 281 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
- /* 282 */ "cmd ::= DETACH database_kw_opt expr",
- /* 283 */ "key_opt ::=",
- /* 284 */ "key_opt ::= KEY expr",
- /* 285 */ "cmd ::= REINDEX",
- /* 286 */ "cmd ::= REINDEX nm dbnm",
- /* 287 */ "cmd ::= ANALYZE",
- /* 288 */ "cmd ::= ANALYZE nm dbnm",
- /* 289 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
- /* 290 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist",
- /* 291 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm",
- /* 292 */ "add_column_fullname ::= fullname",
- /* 293 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm",
- /* 294 */ "cmd ::= create_vtab",
- /* 295 */ "cmd ::= create_vtab LP vtabarglist RP",
- /* 296 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
- /* 297 */ "vtabarg ::=",
- /* 298 */ "vtabargtoken ::= ANY",
- /* 299 */ "vtabargtoken ::= lp anylist RP",
- /* 300 */ "lp ::= LP",
- /* 301 */ "with ::= WITH wqlist",
- /* 302 */ "with ::= WITH RECURSIVE wqlist",
- /* 303 */ "wqas ::= AS",
- /* 304 */ "wqas ::= AS MATERIALIZED",
- /* 305 */ "wqas ::= AS NOT MATERIALIZED",
- /* 306 */ "wqitem ::= nm eidlist_opt wqas LP select RP",
- /* 307 */ "wqlist ::= wqitem",
- /* 308 */ "wqlist ::= wqlist COMMA wqitem",
- /* 309 */ "windowdefn_list ::= windowdefn",
- /* 310 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn",
- /* 311 */ "windowdefn ::= nm AS LP window RP",
- /* 312 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt",
- /* 313 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt",
- /* 314 */ "window ::= ORDER BY sortlist frame_opt",
- /* 315 */ "window ::= nm ORDER BY sortlist frame_opt",
- /* 316 */ "window ::= frame_opt",
+ /* 188 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP",
+ /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP",
+ /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over",
+ /* 191 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over",
+ /* 192 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over",
+ /* 193 */ "term ::= CTIME_KW",
+ /* 194 */ "expr ::= LP nexprlist COMMA expr RP",
+ /* 195 */ "expr ::= expr AND expr",
+ /* 196 */ "expr ::= expr OR expr",
+ /* 197 */ "expr ::= expr LT|GT|GE|LE expr",
+ /* 198 */ "expr ::= expr EQ|NE expr",
+ /* 199 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr",
+ /* 200 */ "expr ::= expr PLUS|MINUS expr",
+ /* 201 */ "expr ::= expr STAR|SLASH|REM expr",
+ /* 202 */ "expr ::= expr CONCAT expr",
+ /* 203 */ "likeop ::= NOT LIKE_KW|MATCH",
+ /* 204 */ "expr ::= expr likeop expr",
+ /* 205 */ "expr ::= expr likeop expr ESCAPE expr",
+ /* 206 */ "expr ::= expr ISNULL|NOTNULL",
+ /* 207 */ "expr ::= expr NOT NULL",
+ /* 208 */ "expr ::= expr IS expr",
+ /* 209 */ "expr ::= expr IS NOT expr",
+ /* 210 */ "expr ::= expr IS NOT DISTINCT FROM expr",
+ /* 211 */ "expr ::= expr IS DISTINCT FROM expr",
+ /* 212 */ "expr ::= NOT expr",
+ /* 213 */ "expr ::= BITNOT expr",
+ /* 214 */ "expr ::= PLUS|MINUS expr",
+ /* 215 */ "expr ::= expr PTR expr",
+ /* 216 */ "between_op ::= BETWEEN",
+ /* 217 */ "between_op ::= NOT BETWEEN",
+ /* 218 */ "expr ::= expr between_op expr AND expr",
+ /* 219 */ "in_op ::= IN",
+ /* 220 */ "in_op ::= NOT IN",
+ /* 221 */ "expr ::= expr in_op LP exprlist RP",
+ /* 222 */ "expr ::= LP select RP",
+ /* 223 */ "expr ::= expr in_op LP select RP",
+ /* 224 */ "expr ::= expr in_op nm dbnm paren_exprlist",
+ /* 225 */ "expr ::= EXISTS LP select RP",
+ /* 226 */ "expr ::= CASE case_operand case_exprlist case_else END",
+ /* 227 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
+ /* 228 */ "case_exprlist ::= WHEN expr THEN expr",
+ /* 229 */ "case_else ::= ELSE expr",
+ /* 230 */ "case_else ::=",
+ /* 231 */ "case_operand ::=",
+ /* 232 */ "exprlist ::=",
+ /* 233 */ "nexprlist ::= nexprlist COMMA expr",
+ /* 234 */ "nexprlist ::= expr",
+ /* 235 */ "paren_exprlist ::=",
+ /* 236 */ "paren_exprlist ::= LP exprlist RP",
+ /* 237 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
+ /* 238 */ "uniqueflag ::= UNIQUE",
+ /* 239 */ "uniqueflag ::=",
+ /* 240 */ "eidlist_opt ::=",
+ /* 241 */ "eidlist_opt ::= LP eidlist RP",
+ /* 242 */ "eidlist ::= eidlist COMMA nm collate sortorder",
+ /* 243 */ "eidlist ::= nm collate sortorder",
+ /* 244 */ "collate ::=",
+ /* 245 */ "collate ::= COLLATE ID|STRING",
+ /* 246 */ "cmd ::= DROP INDEX ifexists fullname",
+ /* 247 */ "cmd ::= VACUUM vinto",
+ /* 248 */ "cmd ::= VACUUM nm vinto",
+ /* 249 */ "vinto ::= INTO expr",
+ /* 250 */ "vinto ::=",
+ /* 251 */ "cmd ::= PRAGMA nm dbnm",
+ /* 252 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
+ /* 253 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
+ /* 254 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
+ /* 255 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
+ /* 256 */ "plus_num ::= PLUS INTEGER|FLOAT",
+ /* 257 */ "minus_num ::= MINUS INTEGER|FLOAT",
+ /* 258 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
+ /* 259 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
+ /* 260 */ "trigger_time ::= BEFORE|AFTER",
+ /* 261 */ "trigger_time ::= INSTEAD OF",
+ /* 262 */ "trigger_time ::=",
+ /* 263 */ "trigger_event ::= DELETE|INSERT",
+ /* 264 */ "trigger_event ::= UPDATE",
+ /* 265 */ "trigger_event ::= UPDATE OF idlist",
+ /* 266 */ "when_clause ::=",
+ /* 267 */ "when_clause ::= WHEN expr",
+ /* 268 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
+ /* 269 */ "trigger_cmd_list ::= trigger_cmd SEMI",
+ /* 270 */ "trnm ::= nm DOT nm",
+ /* 271 */ "tridxby ::= INDEXED BY nm",
+ /* 272 */ "tridxby ::= NOT INDEXED",
+ /* 273 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt",
+ /* 274 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt",
+ /* 275 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt",
+ /* 276 */ "trigger_cmd ::= scanpt select scanpt",
+ /* 277 */ "expr ::= RAISE LP IGNORE RP",
+ /* 278 */ "expr ::= RAISE LP raisetype COMMA nm RP",
+ /* 279 */ "raisetype ::= ROLLBACK",
+ /* 280 */ "raisetype ::= ABORT",
+ /* 281 */ "raisetype ::= FAIL",
+ /* 282 */ "cmd ::= DROP TRIGGER ifexists fullname",
+ /* 283 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
+ /* 284 */ "cmd ::= DETACH database_kw_opt expr",
+ /* 285 */ "key_opt ::=",
+ /* 286 */ "key_opt ::= KEY expr",
+ /* 287 */ "cmd ::= REINDEX",
+ /* 288 */ "cmd ::= REINDEX nm dbnm",
+ /* 289 */ "cmd ::= ANALYZE",
+ /* 290 */ "cmd ::= ANALYZE nm dbnm",
+ /* 291 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
+ /* 292 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist",
+ /* 293 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm",
+ /* 294 */ "add_column_fullname ::= fullname",
+ /* 295 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm",
+ /* 296 */ "cmd ::= create_vtab",
+ /* 297 */ "cmd ::= create_vtab LP vtabarglist RP",
+ /* 298 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
+ /* 299 */ "vtabarg ::=",
+ /* 300 */ "vtabargtoken ::= ANY",
+ /* 301 */ "vtabargtoken ::= lp anylist RP",
+ /* 302 */ "lp ::= LP",
+ /* 303 */ "with ::= WITH wqlist",
+ /* 304 */ "with ::= WITH RECURSIVE wqlist",
+ /* 305 */ "wqas ::= AS",
+ /* 306 */ "wqas ::= AS MATERIALIZED",
+ /* 307 */ "wqas ::= AS NOT MATERIALIZED",
+ /* 308 */ "wqitem ::= nm eidlist_opt wqas LP select RP",
+ /* 309 */ "wqlist ::= wqitem",
+ /* 310 */ "wqlist ::= wqlist COMMA wqitem",
+ /* 311 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn",
+ /* 312 */ "windowdefn ::= nm AS LP window RP",
+ /* 313 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt",
+ /* 314 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt",
+ /* 315 */ "window ::= ORDER BY sortlist frame_opt",
+ /* 316 */ "window ::= nm ORDER BY sortlist frame_opt",
/* 317 */ "window ::= nm frame_opt",
/* 318 */ "frame_opt ::=",
/* 319 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt",
@@ -169940,6 +173131,8 @@ static const char *const yyRuleName[] = {
/* 400 */ "anylist ::= anylist LP anylist RP",
/* 401 */ "anylist ::= anylist ANY",
/* 402 */ "with ::=",
+ /* 403 */ "windowdefn_list ::= windowdefn",
+ /* 404 */ "window ::= frame_opt",
};
#endif /* NDEBUG */
@@ -170634,135 +173827,135 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
217, /* (185) expr ::= expr COLLATE ID|STRING */
217, /* (186) expr ::= CAST LP expr AS typetoken RP */
217, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */
- 217, /* (188) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
- 217, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
- 217, /* (190) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
- 216, /* (191) term ::= CTIME_KW */
- 217, /* (192) expr ::= LP nexprlist COMMA expr RP */
- 217, /* (193) expr ::= expr AND expr */
- 217, /* (194) expr ::= expr OR expr */
- 217, /* (195) expr ::= expr LT|GT|GE|LE expr */
- 217, /* (196) expr ::= expr EQ|NE expr */
- 217, /* (197) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
- 217, /* (198) expr ::= expr PLUS|MINUS expr */
- 217, /* (199) expr ::= expr STAR|SLASH|REM expr */
- 217, /* (200) expr ::= expr CONCAT expr */
- 274, /* (201) likeop ::= NOT LIKE_KW|MATCH */
- 217, /* (202) expr ::= expr likeop expr */
- 217, /* (203) expr ::= expr likeop expr ESCAPE expr */
- 217, /* (204) expr ::= expr ISNULL|NOTNULL */
- 217, /* (205) expr ::= expr NOT NULL */
- 217, /* (206) expr ::= expr IS expr */
- 217, /* (207) expr ::= expr IS NOT expr */
- 217, /* (208) expr ::= expr IS NOT DISTINCT FROM expr */
- 217, /* (209) expr ::= expr IS DISTINCT FROM expr */
- 217, /* (210) expr ::= NOT expr */
- 217, /* (211) expr ::= BITNOT expr */
- 217, /* (212) expr ::= PLUS|MINUS expr */
- 217, /* (213) expr ::= expr PTR expr */
- 275, /* (214) between_op ::= BETWEEN */
- 275, /* (215) between_op ::= NOT BETWEEN */
- 217, /* (216) expr ::= expr between_op expr AND expr */
- 276, /* (217) in_op ::= IN */
- 276, /* (218) in_op ::= NOT IN */
- 217, /* (219) expr ::= expr in_op LP exprlist RP */
- 217, /* (220) expr ::= LP select RP */
- 217, /* (221) expr ::= expr in_op LP select RP */
- 217, /* (222) expr ::= expr in_op nm dbnm paren_exprlist */
- 217, /* (223) expr ::= EXISTS LP select RP */
- 217, /* (224) expr ::= CASE case_operand case_exprlist case_else END */
- 279, /* (225) case_exprlist ::= case_exprlist WHEN expr THEN expr */
- 279, /* (226) case_exprlist ::= WHEN expr THEN expr */
- 280, /* (227) case_else ::= ELSE expr */
- 280, /* (228) case_else ::= */
- 278, /* (229) case_operand ::= */
- 261, /* (230) exprlist ::= */
- 253, /* (231) nexprlist ::= nexprlist COMMA expr */
- 253, /* (232) nexprlist ::= expr */
- 277, /* (233) paren_exprlist ::= */
- 277, /* (234) paren_exprlist ::= LP exprlist RP */
- 190, /* (235) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
- 281, /* (236) uniqueflag ::= UNIQUE */
- 281, /* (237) uniqueflag ::= */
- 221, /* (238) eidlist_opt ::= */
- 221, /* (239) eidlist_opt ::= LP eidlist RP */
- 232, /* (240) eidlist ::= eidlist COMMA nm collate sortorder */
- 232, /* (241) eidlist ::= nm collate sortorder */
- 282, /* (242) collate ::= */
- 282, /* (243) collate ::= COLLATE ID|STRING */
- 190, /* (244) cmd ::= DROP INDEX ifexists fullname */
- 190, /* (245) cmd ::= VACUUM vinto */
- 190, /* (246) cmd ::= VACUUM nm vinto */
- 283, /* (247) vinto ::= INTO expr */
- 283, /* (248) vinto ::= */
- 190, /* (249) cmd ::= PRAGMA nm dbnm */
- 190, /* (250) cmd ::= PRAGMA nm dbnm EQ nmnum */
- 190, /* (251) cmd ::= PRAGMA nm dbnm LP nmnum RP */
- 190, /* (252) cmd ::= PRAGMA nm dbnm EQ minus_num */
- 190, /* (253) cmd ::= PRAGMA nm dbnm LP minus_num RP */
- 211, /* (254) plus_num ::= PLUS INTEGER|FLOAT */
- 212, /* (255) minus_num ::= MINUS INTEGER|FLOAT */
- 190, /* (256) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
- 285, /* (257) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
- 287, /* (258) trigger_time ::= BEFORE|AFTER */
- 287, /* (259) trigger_time ::= INSTEAD OF */
- 287, /* (260) trigger_time ::= */
- 288, /* (261) trigger_event ::= DELETE|INSERT */
- 288, /* (262) trigger_event ::= UPDATE */
- 288, /* (263) trigger_event ::= UPDATE OF idlist */
- 290, /* (264) when_clause ::= */
- 290, /* (265) when_clause ::= WHEN expr */
- 286, /* (266) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
- 286, /* (267) trigger_cmd_list ::= trigger_cmd SEMI */
- 292, /* (268) trnm ::= nm DOT nm */
- 293, /* (269) tridxby ::= INDEXED BY nm */
- 293, /* (270) tridxby ::= NOT INDEXED */
- 291, /* (271) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
- 291, /* (272) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
- 291, /* (273) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
- 291, /* (274) trigger_cmd ::= scanpt select scanpt */
- 217, /* (275) expr ::= RAISE LP IGNORE RP */
- 217, /* (276) expr ::= RAISE LP raisetype COMMA nm RP */
- 236, /* (277) raisetype ::= ROLLBACK */
- 236, /* (278) raisetype ::= ABORT */
- 236, /* (279) raisetype ::= FAIL */
- 190, /* (280) cmd ::= DROP TRIGGER ifexists fullname */
- 190, /* (281) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
- 190, /* (282) cmd ::= DETACH database_kw_opt expr */
- 295, /* (283) key_opt ::= */
- 295, /* (284) key_opt ::= KEY expr */
- 190, /* (285) cmd ::= REINDEX */
- 190, /* (286) cmd ::= REINDEX nm dbnm */
- 190, /* (287) cmd ::= ANALYZE */
- 190, /* (288) cmd ::= ANALYZE nm dbnm */
- 190, /* (289) cmd ::= ALTER TABLE fullname RENAME TO nm */
- 190, /* (290) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
- 190, /* (291) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
- 296, /* (292) add_column_fullname ::= fullname */
- 190, /* (293) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
- 190, /* (294) cmd ::= create_vtab */
- 190, /* (295) cmd ::= create_vtab LP vtabarglist RP */
- 298, /* (296) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
- 300, /* (297) vtabarg ::= */
- 301, /* (298) vtabargtoken ::= ANY */
- 301, /* (299) vtabargtoken ::= lp anylist RP */
- 302, /* (300) lp ::= LP */
- 266, /* (301) with ::= WITH wqlist */
- 266, /* (302) with ::= WITH RECURSIVE wqlist */
- 305, /* (303) wqas ::= AS */
- 305, /* (304) wqas ::= AS MATERIALIZED */
- 305, /* (305) wqas ::= AS NOT MATERIALIZED */
- 304, /* (306) wqitem ::= nm eidlist_opt wqas LP select RP */
- 241, /* (307) wqlist ::= wqitem */
- 241, /* (308) wqlist ::= wqlist COMMA wqitem */
- 306, /* (309) windowdefn_list ::= windowdefn */
- 306, /* (310) windowdefn_list ::= windowdefn_list COMMA windowdefn */
- 307, /* (311) windowdefn ::= nm AS LP window RP */
- 308, /* (312) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
- 308, /* (313) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
- 308, /* (314) window ::= ORDER BY sortlist frame_opt */
- 308, /* (315) window ::= nm ORDER BY sortlist frame_opt */
- 308, /* (316) window ::= frame_opt */
+ 217, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */
+ 217, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
+ 217, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
+ 217, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */
+ 217, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
+ 216, /* (193) term ::= CTIME_KW */
+ 217, /* (194) expr ::= LP nexprlist COMMA expr RP */
+ 217, /* (195) expr ::= expr AND expr */
+ 217, /* (196) expr ::= expr OR expr */
+ 217, /* (197) expr ::= expr LT|GT|GE|LE expr */
+ 217, /* (198) expr ::= expr EQ|NE expr */
+ 217, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
+ 217, /* (200) expr ::= expr PLUS|MINUS expr */
+ 217, /* (201) expr ::= expr STAR|SLASH|REM expr */
+ 217, /* (202) expr ::= expr CONCAT expr */
+ 274, /* (203) likeop ::= NOT LIKE_KW|MATCH */
+ 217, /* (204) expr ::= expr likeop expr */
+ 217, /* (205) expr ::= expr likeop expr ESCAPE expr */
+ 217, /* (206) expr ::= expr ISNULL|NOTNULL */
+ 217, /* (207) expr ::= expr NOT NULL */
+ 217, /* (208) expr ::= expr IS expr */
+ 217, /* (209) expr ::= expr IS NOT expr */
+ 217, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */
+ 217, /* (211) expr ::= expr IS DISTINCT FROM expr */
+ 217, /* (212) expr ::= NOT expr */
+ 217, /* (213) expr ::= BITNOT expr */
+ 217, /* (214) expr ::= PLUS|MINUS expr */
+ 217, /* (215) expr ::= expr PTR expr */
+ 275, /* (216) between_op ::= BETWEEN */
+ 275, /* (217) between_op ::= NOT BETWEEN */
+ 217, /* (218) expr ::= expr between_op expr AND expr */
+ 276, /* (219) in_op ::= IN */
+ 276, /* (220) in_op ::= NOT IN */
+ 217, /* (221) expr ::= expr in_op LP exprlist RP */
+ 217, /* (222) expr ::= LP select RP */
+ 217, /* (223) expr ::= expr in_op LP select RP */
+ 217, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */
+ 217, /* (225) expr ::= EXISTS LP select RP */
+ 217, /* (226) expr ::= CASE case_operand case_exprlist case_else END */
+ 279, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ 279, /* (228) case_exprlist ::= WHEN expr THEN expr */
+ 280, /* (229) case_else ::= ELSE expr */
+ 280, /* (230) case_else ::= */
+ 278, /* (231) case_operand ::= */
+ 261, /* (232) exprlist ::= */
+ 253, /* (233) nexprlist ::= nexprlist COMMA expr */
+ 253, /* (234) nexprlist ::= expr */
+ 277, /* (235) paren_exprlist ::= */
+ 277, /* (236) paren_exprlist ::= LP exprlist RP */
+ 190, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ 281, /* (238) uniqueflag ::= UNIQUE */
+ 281, /* (239) uniqueflag ::= */
+ 221, /* (240) eidlist_opt ::= */
+ 221, /* (241) eidlist_opt ::= LP eidlist RP */
+ 232, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */
+ 232, /* (243) eidlist ::= nm collate sortorder */
+ 282, /* (244) collate ::= */
+ 282, /* (245) collate ::= COLLATE ID|STRING */
+ 190, /* (246) cmd ::= DROP INDEX ifexists fullname */
+ 190, /* (247) cmd ::= VACUUM vinto */
+ 190, /* (248) cmd ::= VACUUM nm vinto */
+ 283, /* (249) vinto ::= INTO expr */
+ 283, /* (250) vinto ::= */
+ 190, /* (251) cmd ::= PRAGMA nm dbnm */
+ 190, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */
+ 190, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ 190, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */
+ 190, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ 211, /* (256) plus_num ::= PLUS INTEGER|FLOAT */
+ 212, /* (257) minus_num ::= MINUS INTEGER|FLOAT */
+ 190, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ 285, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ 287, /* (260) trigger_time ::= BEFORE|AFTER */
+ 287, /* (261) trigger_time ::= INSTEAD OF */
+ 287, /* (262) trigger_time ::= */
+ 288, /* (263) trigger_event ::= DELETE|INSERT */
+ 288, /* (264) trigger_event ::= UPDATE */
+ 288, /* (265) trigger_event ::= UPDATE OF idlist */
+ 290, /* (266) when_clause ::= */
+ 290, /* (267) when_clause ::= WHEN expr */
+ 286, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ 286, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */
+ 292, /* (270) trnm ::= nm DOT nm */
+ 293, /* (271) tridxby ::= INDEXED BY nm */
+ 293, /* (272) tridxby ::= NOT INDEXED */
+ 291, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+ 291, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ 291, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ 291, /* (276) trigger_cmd ::= scanpt select scanpt */
+ 217, /* (277) expr ::= RAISE LP IGNORE RP */
+ 217, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */
+ 236, /* (279) raisetype ::= ROLLBACK */
+ 236, /* (280) raisetype ::= ABORT */
+ 236, /* (281) raisetype ::= FAIL */
+ 190, /* (282) cmd ::= DROP TRIGGER ifexists fullname */
+ 190, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ 190, /* (284) cmd ::= DETACH database_kw_opt expr */
+ 295, /* (285) key_opt ::= */
+ 295, /* (286) key_opt ::= KEY expr */
+ 190, /* (287) cmd ::= REINDEX */
+ 190, /* (288) cmd ::= REINDEX nm dbnm */
+ 190, /* (289) cmd ::= ANALYZE */
+ 190, /* (290) cmd ::= ANALYZE nm dbnm */
+ 190, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */
+ 190, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ 190, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
+ 296, /* (294) add_column_fullname ::= fullname */
+ 190, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+ 190, /* (296) cmd ::= create_vtab */
+ 190, /* (297) cmd ::= create_vtab LP vtabarglist RP */
+ 298, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ 300, /* (299) vtabarg ::= */
+ 301, /* (300) vtabargtoken ::= ANY */
+ 301, /* (301) vtabargtoken ::= lp anylist RP */
+ 302, /* (302) lp ::= LP */
+ 266, /* (303) with ::= WITH wqlist */
+ 266, /* (304) with ::= WITH RECURSIVE wqlist */
+ 305, /* (305) wqas ::= AS */
+ 305, /* (306) wqas ::= AS MATERIALIZED */
+ 305, /* (307) wqas ::= AS NOT MATERIALIZED */
+ 304, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */
+ 241, /* (309) wqlist ::= wqitem */
+ 241, /* (310) wqlist ::= wqlist COMMA wqitem */
+ 306, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */
+ 307, /* (312) windowdefn ::= nm AS LP window RP */
+ 308, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+ 308, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+ 308, /* (315) window ::= ORDER BY sortlist frame_opt */
+ 308, /* (316) window ::= nm ORDER BY sortlist frame_opt */
308, /* (317) window ::= nm frame_opt */
309, /* (318) frame_opt ::= */
309, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
@@ -170849,6 +174042,8 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
303, /* (400) anylist ::= anylist LP anylist RP */
303, /* (401) anylist ::= anylist ANY */
266, /* (402) with ::= */
+ 306, /* (403) windowdefn_list ::= windowdefn */
+ 308, /* (404) window ::= frame_opt */
};
/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number
@@ -171042,135 +174237,135 @@ static const signed char yyRuleInfoNRhs[] = {
-3, /* (185) expr ::= expr COLLATE ID|STRING */
-6, /* (186) expr ::= CAST LP expr AS typetoken RP */
-5, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */
- -4, /* (188) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
- -6, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
- -5, /* (190) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
- -1, /* (191) term ::= CTIME_KW */
- -5, /* (192) expr ::= LP nexprlist COMMA expr RP */
- -3, /* (193) expr ::= expr AND expr */
- -3, /* (194) expr ::= expr OR expr */
- -3, /* (195) expr ::= expr LT|GT|GE|LE expr */
- -3, /* (196) expr ::= expr EQ|NE expr */
- -3, /* (197) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
- -3, /* (198) expr ::= expr PLUS|MINUS expr */
- -3, /* (199) expr ::= expr STAR|SLASH|REM expr */
- -3, /* (200) expr ::= expr CONCAT expr */
- -2, /* (201) likeop ::= NOT LIKE_KW|MATCH */
- -3, /* (202) expr ::= expr likeop expr */
- -5, /* (203) expr ::= expr likeop expr ESCAPE expr */
- -2, /* (204) expr ::= expr ISNULL|NOTNULL */
- -3, /* (205) expr ::= expr NOT NULL */
- -3, /* (206) expr ::= expr IS expr */
- -4, /* (207) expr ::= expr IS NOT expr */
- -6, /* (208) expr ::= expr IS NOT DISTINCT FROM expr */
- -5, /* (209) expr ::= expr IS DISTINCT FROM expr */
- -2, /* (210) expr ::= NOT expr */
- -2, /* (211) expr ::= BITNOT expr */
- -2, /* (212) expr ::= PLUS|MINUS expr */
- -3, /* (213) expr ::= expr PTR expr */
- -1, /* (214) between_op ::= BETWEEN */
- -2, /* (215) between_op ::= NOT BETWEEN */
- -5, /* (216) expr ::= expr between_op expr AND expr */
- -1, /* (217) in_op ::= IN */
- -2, /* (218) in_op ::= NOT IN */
- -5, /* (219) expr ::= expr in_op LP exprlist RP */
- -3, /* (220) expr ::= LP select RP */
- -5, /* (221) expr ::= expr in_op LP select RP */
- -5, /* (222) expr ::= expr in_op nm dbnm paren_exprlist */
- -4, /* (223) expr ::= EXISTS LP select RP */
- -5, /* (224) expr ::= CASE case_operand case_exprlist case_else END */
- -5, /* (225) case_exprlist ::= case_exprlist WHEN expr THEN expr */
- -4, /* (226) case_exprlist ::= WHEN expr THEN expr */
- -2, /* (227) case_else ::= ELSE expr */
- 0, /* (228) case_else ::= */
- 0, /* (229) case_operand ::= */
- 0, /* (230) exprlist ::= */
- -3, /* (231) nexprlist ::= nexprlist COMMA expr */
- -1, /* (232) nexprlist ::= expr */
- 0, /* (233) paren_exprlist ::= */
- -3, /* (234) paren_exprlist ::= LP exprlist RP */
- -12, /* (235) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
- -1, /* (236) uniqueflag ::= UNIQUE */
- 0, /* (237) uniqueflag ::= */
- 0, /* (238) eidlist_opt ::= */
- -3, /* (239) eidlist_opt ::= LP eidlist RP */
- -5, /* (240) eidlist ::= eidlist COMMA nm collate sortorder */
- -3, /* (241) eidlist ::= nm collate sortorder */
- 0, /* (242) collate ::= */
- -2, /* (243) collate ::= COLLATE ID|STRING */
- -4, /* (244) cmd ::= DROP INDEX ifexists fullname */
- -2, /* (245) cmd ::= VACUUM vinto */
- -3, /* (246) cmd ::= VACUUM nm vinto */
- -2, /* (247) vinto ::= INTO expr */
- 0, /* (248) vinto ::= */
- -3, /* (249) cmd ::= PRAGMA nm dbnm */
- -5, /* (250) cmd ::= PRAGMA nm dbnm EQ nmnum */
- -6, /* (251) cmd ::= PRAGMA nm dbnm LP nmnum RP */
- -5, /* (252) cmd ::= PRAGMA nm dbnm EQ minus_num */
- -6, /* (253) cmd ::= PRAGMA nm dbnm LP minus_num RP */
- -2, /* (254) plus_num ::= PLUS INTEGER|FLOAT */
- -2, /* (255) minus_num ::= MINUS INTEGER|FLOAT */
- -5, /* (256) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
- -11, /* (257) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
- -1, /* (258) trigger_time ::= BEFORE|AFTER */
- -2, /* (259) trigger_time ::= INSTEAD OF */
- 0, /* (260) trigger_time ::= */
- -1, /* (261) trigger_event ::= DELETE|INSERT */
- -1, /* (262) trigger_event ::= UPDATE */
- -3, /* (263) trigger_event ::= UPDATE OF idlist */
- 0, /* (264) when_clause ::= */
- -2, /* (265) when_clause ::= WHEN expr */
- -3, /* (266) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
- -2, /* (267) trigger_cmd_list ::= trigger_cmd SEMI */
- -3, /* (268) trnm ::= nm DOT nm */
- -3, /* (269) tridxby ::= INDEXED BY nm */
- -2, /* (270) tridxby ::= NOT INDEXED */
- -9, /* (271) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
- -8, /* (272) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
- -6, /* (273) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
- -3, /* (274) trigger_cmd ::= scanpt select scanpt */
- -4, /* (275) expr ::= RAISE LP IGNORE RP */
- -6, /* (276) expr ::= RAISE LP raisetype COMMA nm RP */
- -1, /* (277) raisetype ::= ROLLBACK */
- -1, /* (278) raisetype ::= ABORT */
- -1, /* (279) raisetype ::= FAIL */
- -4, /* (280) cmd ::= DROP TRIGGER ifexists fullname */
- -6, /* (281) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
- -3, /* (282) cmd ::= DETACH database_kw_opt expr */
- 0, /* (283) key_opt ::= */
- -2, /* (284) key_opt ::= KEY expr */
- -1, /* (285) cmd ::= REINDEX */
- -3, /* (286) cmd ::= REINDEX nm dbnm */
- -1, /* (287) cmd ::= ANALYZE */
- -3, /* (288) cmd ::= ANALYZE nm dbnm */
- -6, /* (289) cmd ::= ALTER TABLE fullname RENAME TO nm */
- -7, /* (290) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
- -6, /* (291) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
- -1, /* (292) add_column_fullname ::= fullname */
- -8, /* (293) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
- -1, /* (294) cmd ::= create_vtab */
- -4, /* (295) cmd ::= create_vtab LP vtabarglist RP */
- -8, /* (296) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
- 0, /* (297) vtabarg ::= */
- -1, /* (298) vtabargtoken ::= ANY */
- -3, /* (299) vtabargtoken ::= lp anylist RP */
- -1, /* (300) lp ::= LP */
- -2, /* (301) with ::= WITH wqlist */
- -3, /* (302) with ::= WITH RECURSIVE wqlist */
- -1, /* (303) wqas ::= AS */
- -2, /* (304) wqas ::= AS MATERIALIZED */
- -3, /* (305) wqas ::= AS NOT MATERIALIZED */
- -6, /* (306) wqitem ::= nm eidlist_opt wqas LP select RP */
- -1, /* (307) wqlist ::= wqitem */
- -3, /* (308) wqlist ::= wqlist COMMA wqitem */
- -1, /* (309) windowdefn_list ::= windowdefn */
- -3, /* (310) windowdefn_list ::= windowdefn_list COMMA windowdefn */
- -5, /* (311) windowdefn ::= nm AS LP window RP */
- -5, /* (312) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
- -6, /* (313) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
- -4, /* (314) window ::= ORDER BY sortlist frame_opt */
- -5, /* (315) window ::= nm ORDER BY sortlist frame_opt */
- -1, /* (316) window ::= frame_opt */
+ -8, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */
+ -4, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
+ -6, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
+ -9, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */
+ -5, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
+ -1, /* (193) term ::= CTIME_KW */
+ -5, /* (194) expr ::= LP nexprlist COMMA expr RP */
+ -3, /* (195) expr ::= expr AND expr */
+ -3, /* (196) expr ::= expr OR expr */
+ -3, /* (197) expr ::= expr LT|GT|GE|LE expr */
+ -3, /* (198) expr ::= expr EQ|NE expr */
+ -3, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
+ -3, /* (200) expr ::= expr PLUS|MINUS expr */
+ -3, /* (201) expr ::= expr STAR|SLASH|REM expr */
+ -3, /* (202) expr ::= expr CONCAT expr */
+ -2, /* (203) likeop ::= NOT LIKE_KW|MATCH */
+ -3, /* (204) expr ::= expr likeop expr */
+ -5, /* (205) expr ::= expr likeop expr ESCAPE expr */
+ -2, /* (206) expr ::= expr ISNULL|NOTNULL */
+ -3, /* (207) expr ::= expr NOT NULL */
+ -3, /* (208) expr ::= expr IS expr */
+ -4, /* (209) expr ::= expr IS NOT expr */
+ -6, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */
+ -5, /* (211) expr ::= expr IS DISTINCT FROM expr */
+ -2, /* (212) expr ::= NOT expr */
+ -2, /* (213) expr ::= BITNOT expr */
+ -2, /* (214) expr ::= PLUS|MINUS expr */
+ -3, /* (215) expr ::= expr PTR expr */
+ -1, /* (216) between_op ::= BETWEEN */
+ -2, /* (217) between_op ::= NOT BETWEEN */
+ -5, /* (218) expr ::= expr between_op expr AND expr */
+ -1, /* (219) in_op ::= IN */
+ -2, /* (220) in_op ::= NOT IN */
+ -5, /* (221) expr ::= expr in_op LP exprlist RP */
+ -3, /* (222) expr ::= LP select RP */
+ -5, /* (223) expr ::= expr in_op LP select RP */
+ -5, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */
+ -4, /* (225) expr ::= EXISTS LP select RP */
+ -5, /* (226) expr ::= CASE case_operand case_exprlist case_else END */
+ -5, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ -4, /* (228) case_exprlist ::= WHEN expr THEN expr */
+ -2, /* (229) case_else ::= ELSE expr */
+ 0, /* (230) case_else ::= */
+ 0, /* (231) case_operand ::= */
+ 0, /* (232) exprlist ::= */
+ -3, /* (233) nexprlist ::= nexprlist COMMA expr */
+ -1, /* (234) nexprlist ::= expr */
+ 0, /* (235) paren_exprlist ::= */
+ -3, /* (236) paren_exprlist ::= LP exprlist RP */
+ -12, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ -1, /* (238) uniqueflag ::= UNIQUE */
+ 0, /* (239) uniqueflag ::= */
+ 0, /* (240) eidlist_opt ::= */
+ -3, /* (241) eidlist_opt ::= LP eidlist RP */
+ -5, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */
+ -3, /* (243) eidlist ::= nm collate sortorder */
+ 0, /* (244) collate ::= */
+ -2, /* (245) collate ::= COLLATE ID|STRING */
+ -4, /* (246) cmd ::= DROP INDEX ifexists fullname */
+ -2, /* (247) cmd ::= VACUUM vinto */
+ -3, /* (248) cmd ::= VACUUM nm vinto */
+ -2, /* (249) vinto ::= INTO expr */
+ 0, /* (250) vinto ::= */
+ -3, /* (251) cmd ::= PRAGMA nm dbnm */
+ -5, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */
+ -6, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ -5, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */
+ -6, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ -2, /* (256) plus_num ::= PLUS INTEGER|FLOAT */
+ -2, /* (257) minus_num ::= MINUS INTEGER|FLOAT */
+ -5, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ -11, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ -1, /* (260) trigger_time ::= BEFORE|AFTER */
+ -2, /* (261) trigger_time ::= INSTEAD OF */
+ 0, /* (262) trigger_time ::= */
+ -1, /* (263) trigger_event ::= DELETE|INSERT */
+ -1, /* (264) trigger_event ::= UPDATE */
+ -3, /* (265) trigger_event ::= UPDATE OF idlist */
+ 0, /* (266) when_clause ::= */
+ -2, /* (267) when_clause ::= WHEN expr */
+ -3, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ -2, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */
+ -3, /* (270) trnm ::= nm DOT nm */
+ -3, /* (271) tridxby ::= INDEXED BY nm */
+ -2, /* (272) tridxby ::= NOT INDEXED */
+ -9, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+ -8, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ -6, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ -3, /* (276) trigger_cmd ::= scanpt select scanpt */
+ -4, /* (277) expr ::= RAISE LP IGNORE RP */
+ -6, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */
+ -1, /* (279) raisetype ::= ROLLBACK */
+ -1, /* (280) raisetype ::= ABORT */
+ -1, /* (281) raisetype ::= FAIL */
+ -4, /* (282) cmd ::= DROP TRIGGER ifexists fullname */
+ -6, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ -3, /* (284) cmd ::= DETACH database_kw_opt expr */
+ 0, /* (285) key_opt ::= */
+ -2, /* (286) key_opt ::= KEY expr */
+ -1, /* (287) cmd ::= REINDEX */
+ -3, /* (288) cmd ::= REINDEX nm dbnm */
+ -1, /* (289) cmd ::= ANALYZE */
+ -3, /* (290) cmd ::= ANALYZE nm dbnm */
+ -6, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */
+ -7, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ -6, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
+ -1, /* (294) add_column_fullname ::= fullname */
+ -8, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+ -1, /* (296) cmd ::= create_vtab */
+ -4, /* (297) cmd ::= create_vtab LP vtabarglist RP */
+ -8, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ 0, /* (299) vtabarg ::= */
+ -1, /* (300) vtabargtoken ::= ANY */
+ -3, /* (301) vtabargtoken ::= lp anylist RP */
+ -1, /* (302) lp ::= LP */
+ -2, /* (303) with ::= WITH wqlist */
+ -3, /* (304) with ::= WITH RECURSIVE wqlist */
+ -1, /* (305) wqas ::= AS */
+ -2, /* (306) wqas ::= AS MATERIALIZED */
+ -3, /* (307) wqas ::= AS NOT MATERIALIZED */
+ -6, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */
+ -1, /* (309) wqlist ::= wqitem */
+ -3, /* (310) wqlist ::= wqlist COMMA wqitem */
+ -3, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */
+ -5, /* (312) windowdefn ::= nm AS LP window RP */
+ -5, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+ -6, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+ -4, /* (315) window ::= ORDER BY sortlist frame_opt */
+ -5, /* (316) window ::= nm ORDER BY sortlist frame_opt */
-2, /* (317) window ::= nm frame_opt */
0, /* (318) frame_opt ::= */
-3, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
@@ -171257,6 +174452,8 @@ static const signed char yyRuleInfoNRhs[] = {
-4, /* (400) anylist ::= anylist LP anylist RP */
-2, /* (401) anylist ::= anylist ANY */
0, /* (402) with ::= */
+ -1, /* (403) windowdefn_list ::= windowdefn */
+ -1, /* (404) window ::= frame_opt */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -171299,10 +174496,10 @@ static YYACTIONTYPE yy_reduce(
/********** Begin reduce actions **********************************************/
YYMINORTYPE yylhsminor;
case 0: /* explain ::= EXPLAIN */
-{ pParse->explain = 1; }
+{ if( pParse->pReprepare==0 ) pParse->explain = 1; }
break;
case 1: /* explain ::= EXPLAIN QUERY PLAN */
-{ pParse->explain = 2; }
+{ if( pParse->pReprepare==0 ) pParse->explain = 2; }
break;
case 2: /* cmdx ::= cmd */
{ sqlite3FinishCoding(pParse); }
@@ -171353,7 +174550,7 @@ static YYACTIONTYPE yy_reduce(
case 72: /* defer_subclause_opt ::= */ yytestcase(yyruleno==72);
case 81: /* ifexists ::= */ yytestcase(yyruleno==81);
case 98: /* distinct ::= */ yytestcase(yyruleno==98);
- case 242: /* collate ::= */ yytestcase(yyruleno==242);
+ case 244: /* collate ::= */ yytestcase(yyruleno==244);
{yymsp[1].minor.yy394 = 0;}
break;
case 16: /* ifnotexists ::= IF NOT EXISTS */
@@ -171537,9 +174734,9 @@ static YYACTIONTYPE yy_reduce(
break;
case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */
case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80);
- case 215: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==215);
- case 218: /* in_op ::= NOT IN */ yytestcase(yyruleno==218);
- case 243: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==243);
+ case 217: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==217);
+ case 220: /* in_op ::= NOT IN */ yytestcase(yyruleno==220);
+ case 245: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==245);
{yymsp[-1].minor.yy394 = 1;}
break;
case 64: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */
@@ -171612,7 +174809,6 @@ static YYACTIONTYPE yy_reduce(
if( p ){
parserDoubleLinkSelect(pParse, p);
}
- yymsp[0].minor.yy47 = p; /*A-overwrites-X*/
}
break;
case 88: /* selectnowith ::= selectnowith multiselect_op oneselect */
@@ -171689,9 +174885,9 @@ static YYACTIONTYPE yy_reduce(
case 99: /* sclp ::= */
case 132: /* orderby_opt ::= */ yytestcase(yyruleno==132);
case 142: /* groupby_opt ::= */ yytestcase(yyruleno==142);
- case 230: /* exprlist ::= */ yytestcase(yyruleno==230);
- case 233: /* paren_exprlist ::= */ yytestcase(yyruleno==233);
- case 238: /* eidlist_opt ::= */ yytestcase(yyruleno==238);
+ case 232: /* exprlist ::= */ yytestcase(yyruleno==232);
+ case 235: /* paren_exprlist ::= */ yytestcase(yyruleno==235);
+ case 240: /* eidlist_opt ::= */ yytestcase(yyruleno==240);
{yymsp[1].minor.yy322 = 0;}
break;
case 100: /* selcollist ::= sclp scanpt expr scanpt as */
@@ -171704,21 +174900,24 @@ static YYACTIONTYPE yy_reduce(
case 101: /* selcollist ::= sclp scanpt STAR */
{
Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0);
+ sqlite3ExprSetErrorOffset(p, (int)(yymsp[0].minor.yy0.z - pParse->zTail));
yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy322, p);
}
break;
case 102: /* selcollist ::= sclp scanpt nm DOT STAR */
{
- Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0);
- Expr *pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0);
- Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight);
+ Expr *pRight, *pLeft, *pDot;
+ pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0);
+ sqlite3ExprSetErrorOffset(pRight, (int)(yymsp[0].minor.yy0.z - pParse->zTail));
+ pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0);
+ pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight);
yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, pDot);
}
break;
case 103: /* as ::= AS nm */
case 115: /* dbnm ::= DOT nm */ yytestcase(yyruleno==115);
- case 254: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==254);
- case 255: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==255);
+ case 256: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==256);
+ case 257: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==257);
{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;}
break;
case 105: /* from ::= */
@@ -171890,16 +175089,16 @@ static YYACTIONTYPE yy_reduce(
case 146: /* limit_opt ::= */ yytestcase(yyruleno==146);
case 151: /* where_opt ::= */ yytestcase(yyruleno==151);
case 153: /* where_opt_ret ::= */ yytestcase(yyruleno==153);
- case 228: /* case_else ::= */ yytestcase(yyruleno==228);
- case 229: /* case_operand ::= */ yytestcase(yyruleno==229);
- case 248: /* vinto ::= */ yytestcase(yyruleno==248);
+ case 230: /* case_else ::= */ yytestcase(yyruleno==230);
+ case 231: /* case_operand ::= */ yytestcase(yyruleno==231);
+ case 250: /* vinto ::= */ yytestcase(yyruleno==250);
{yymsp[1].minor.yy528 = 0;}
break;
case 145: /* having_opt ::= HAVING expr */
case 152: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==152);
case 154: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==154);
- case 227: /* case_else ::= ELSE expr */ yytestcase(yyruleno==227);
- case 247: /* vinto ::= INTO expr */ yytestcase(yyruleno==247);
+ case 229: /* case_else ::= ELSE expr */ yytestcase(yyruleno==229);
+ case 249: /* vinto ::= INTO expr */ yytestcase(yyruleno==249);
{yymsp[-1].minor.yy528 = yymsp[0].minor.yy528;}
break;
case 147: /* limit_opt ::= LIMIT expr */
@@ -172085,33 +175284,48 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-4].minor.yy528 = yylhsminor.yy528;
break;
- case 188: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
+ case 188: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */
+{
+ yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy322, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy394);
+ sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-1].minor.yy322);
+}
+ yymsp[-7].minor.yy528 = yylhsminor.yy528;
+ break;
+ case 189: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
{
yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0);
}
yymsp[-3].minor.yy528 = yylhsminor.yy528;
break;
- case 189: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
+ case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
{
yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy322, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy394);
sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41);
}
yymsp[-5].minor.yy528 = yylhsminor.yy528;
break;
- case 190: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
+ case 191: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */
+{
+ yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy322, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy394);
+ sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41);
+ sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-2].minor.yy322);
+}
+ yymsp[-8].minor.yy528 = yylhsminor.yy528;
+ break;
+ case 192: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
{
yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0);
sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41);
}
yymsp[-4].minor.yy528 = yylhsminor.yy528;
break;
- case 191: /* term ::= CTIME_KW */
+ case 193: /* term ::= CTIME_KW */
{
yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0);
}
yymsp[0].minor.yy528 = yylhsminor.yy528;
break;
- case 192: /* expr ::= LP nexprlist COMMA expr RP */
+ case 194: /* expr ::= LP nexprlist COMMA expr RP */
{
ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528);
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0);
@@ -172125,22 +175339,22 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 193: /* expr ::= expr AND expr */
+ case 195: /* expr ::= expr AND expr */
{yymsp[-2].minor.yy528=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);}
break;
- case 194: /* expr ::= expr OR expr */
- case 195: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==195);
- case 196: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==196);
- case 197: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==197);
- case 198: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==198);
- case 199: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==199);
- case 200: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==200);
+ case 196: /* expr ::= expr OR expr */
+ case 197: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==197);
+ case 198: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==198);
+ case 199: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==199);
+ case 200: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==200);
+ case 201: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==201);
+ case 202: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==202);
{yymsp[-2].minor.yy528=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);}
break;
- case 201: /* likeop ::= NOT LIKE_KW|MATCH */
+ case 203: /* likeop ::= NOT LIKE_KW|MATCH */
{yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/}
break;
- case 202: /* expr ::= expr likeop expr */
+ case 204: /* expr ::= expr likeop expr */
{
ExprList *pList;
int bNot = yymsp[-1].minor.yy0.n & 0x80000000;
@@ -172152,7 +175366,7 @@ static YYACTIONTYPE yy_reduce(
if( yymsp[-2].minor.yy528 ) yymsp[-2].minor.yy528->flags |= EP_InfixFunc;
}
break;
- case 203: /* expr ::= expr likeop expr ESCAPE expr */
+ case 205: /* expr ::= expr likeop expr ESCAPE expr */
{
ExprList *pList;
int bNot = yymsp[-3].minor.yy0.n & 0x80000000;
@@ -172165,47 +175379,47 @@ static YYACTIONTYPE yy_reduce(
if( yymsp[-4].minor.yy528 ) yymsp[-4].minor.yy528->flags |= EP_InfixFunc;
}
break;
- case 204: /* expr ::= expr ISNULL|NOTNULL */
+ case 206: /* expr ::= expr ISNULL|NOTNULL */
{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy528,0);}
break;
- case 205: /* expr ::= expr NOT NULL */
+ case 207: /* expr ::= expr NOT NULL */
{yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy528,0);}
break;
- case 206: /* expr ::= expr IS expr */
+ case 208: /* expr ::= expr IS expr */
{
yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);
binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-2].minor.yy528, TK_ISNULL);
}
break;
- case 207: /* expr ::= expr IS NOT expr */
+ case 209: /* expr ::= expr IS NOT expr */
{
yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy528,yymsp[0].minor.yy528);
binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-3].minor.yy528, TK_NOTNULL);
}
break;
- case 208: /* expr ::= expr IS NOT DISTINCT FROM expr */
+ case 210: /* expr ::= expr IS NOT DISTINCT FROM expr */
{
yymsp[-5].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy528,yymsp[0].minor.yy528);
binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-5].minor.yy528, TK_ISNULL);
}
break;
- case 209: /* expr ::= expr IS DISTINCT FROM expr */
+ case 211: /* expr ::= expr IS DISTINCT FROM expr */
{
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy528,yymsp[0].minor.yy528);
binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-4].minor.yy528, TK_NOTNULL);
}
break;
- case 210: /* expr ::= NOT expr */
- case 211: /* expr ::= BITNOT expr */ yytestcase(yyruleno==211);
+ case 212: /* expr ::= NOT expr */
+ case 213: /* expr ::= BITNOT expr */ yytestcase(yyruleno==213);
{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy528, 0);/*A-overwrites-B*/}
break;
- case 212: /* expr ::= PLUS|MINUS expr */
+ case 214: /* expr ::= PLUS|MINUS expr */
{
yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy528, 0);
/*A-overwrites-B*/
}
break;
- case 213: /* expr ::= expr PTR expr */
+ case 215: /* expr ::= expr PTR expr */
{
ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy528);
pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy528);
@@ -172213,11 +175427,11 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-2].minor.yy528 = yylhsminor.yy528;
break;
- case 214: /* between_op ::= BETWEEN */
- case 217: /* in_op ::= IN */ yytestcase(yyruleno==217);
+ case 216: /* between_op ::= BETWEEN */
+ case 219: /* in_op ::= IN */ yytestcase(yyruleno==219);
{yymsp[0].minor.yy394 = 0;}
break;
- case 216: /* expr ::= expr between_op expr AND expr */
+ case 218: /* expr ::= expr between_op expr AND expr */
{
ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528);
pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528);
@@ -172230,7 +175444,7 @@ static YYACTIONTYPE yy_reduce(
if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0);
}
break;
- case 219: /* expr ::= expr in_op LP exprlist RP */
+ case 221: /* expr ::= expr in_op LP exprlist RP */
{
if( yymsp[-1].minor.yy322==0 ){
/* Expressions of the form
@@ -172276,20 +175490,20 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 220: /* expr ::= LP select RP */
+ case 222: /* expr ::= LP select RP */
{
yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy528, yymsp[-1].minor.yy47);
}
break;
- case 221: /* expr ::= expr in_op LP select RP */
+ case 223: /* expr ::= expr in_op LP select RP */
{
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0);
sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, yymsp[-1].minor.yy47);
if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0);
}
break;
- case 222: /* expr ::= expr in_op nm dbnm paren_exprlist */
+ case 224: /* expr ::= expr in_op nm dbnm paren_exprlist */
{
SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);
Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0);
@@ -172299,14 +175513,14 @@ static YYACTIONTYPE yy_reduce(
if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0);
}
break;
- case 223: /* expr ::= EXISTS LP select RP */
+ case 225: /* expr ::= EXISTS LP select RP */
{
Expr *p;
p = yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0);
sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy47);
}
break;
- case 224: /* expr ::= CASE case_operand case_exprlist case_else END */
+ case 226: /* expr ::= CASE case_operand case_exprlist case_else END */
{
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy528, 0);
if( yymsp[-4].minor.yy528 ){
@@ -172318,29 +175532,29 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 225: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ case 227: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
{
yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[-2].minor.yy528);
yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[0].minor.yy528);
}
break;
- case 226: /* case_exprlist ::= WHEN expr THEN expr */
+ case 228: /* case_exprlist ::= WHEN expr THEN expr */
{
yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528);
yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322, yymsp[0].minor.yy528);
}
break;
- case 231: /* nexprlist ::= nexprlist COMMA expr */
+ case 233: /* nexprlist ::= nexprlist COMMA expr */
{yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[0].minor.yy528);}
break;
- case 232: /* nexprlist ::= expr */
+ case 234: /* nexprlist ::= expr */
{yymsp[0].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy528); /*A-overwrites-Y*/}
break;
- case 234: /* paren_exprlist ::= LP exprlist RP */
- case 239: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==239);
+ case 236: /* paren_exprlist ::= LP exprlist RP */
+ case 241: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==241);
{yymsp[-2].minor.yy322 = yymsp[-1].minor.yy322;}
break;
- case 235: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ case 237: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
{
sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0,
sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy322, yymsp[-10].minor.yy394,
@@ -172350,48 +175564,48 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 236: /* uniqueflag ::= UNIQUE */
- case 278: /* raisetype ::= ABORT */ yytestcase(yyruleno==278);
+ case 238: /* uniqueflag ::= UNIQUE */
+ case 280: /* raisetype ::= ABORT */ yytestcase(yyruleno==280);
{yymsp[0].minor.yy394 = OE_Abort;}
break;
- case 237: /* uniqueflag ::= */
+ case 239: /* uniqueflag ::= */
{yymsp[1].minor.yy394 = OE_None;}
break;
- case 240: /* eidlist ::= eidlist COMMA nm collate sortorder */
+ case 242: /* eidlist ::= eidlist COMMA nm collate sortorder */
{
yymsp[-4].minor.yy322 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394);
}
break;
- case 241: /* eidlist ::= nm collate sortorder */
+ case 243: /* eidlist ::= nm collate sortorder */
{
yymsp[-2].minor.yy322 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); /*A-overwrites-Y*/
}
break;
- case 244: /* cmd ::= DROP INDEX ifexists fullname */
+ case 246: /* cmd ::= DROP INDEX ifexists fullname */
{sqlite3DropIndex(pParse, yymsp[0].minor.yy131, yymsp[-1].minor.yy394);}
break;
- case 245: /* cmd ::= VACUUM vinto */
+ case 247: /* cmd ::= VACUUM vinto */
{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy528);}
break;
- case 246: /* cmd ::= VACUUM nm vinto */
+ case 248: /* cmd ::= VACUUM nm vinto */
{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy528);}
break;
- case 249: /* cmd ::= PRAGMA nm dbnm */
+ case 251: /* cmd ::= PRAGMA nm dbnm */
{sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);}
break;
- case 250: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
+ case 252: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);}
break;
- case 251: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ case 253: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);}
break;
- case 252: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
+ case 254: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);}
break;
- case 253: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ case 255: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);}
break;
- case 256: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ case 258: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
{
Token all;
all.z = yymsp[-3].minor.yy0.z;
@@ -172399,50 +175613,50 @@ static YYACTIONTYPE yy_reduce(
sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy33, &all);
}
break;
- case 257: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ case 259: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
{
sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy394, yymsp[-4].minor.yy180.a, yymsp[-4].minor.yy180.b, yymsp[-2].minor.yy131, yymsp[0].minor.yy528, yymsp[-10].minor.yy394, yymsp[-8].minor.yy394);
yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/
}
break;
- case 258: /* trigger_time ::= BEFORE|AFTER */
+ case 260: /* trigger_time ::= BEFORE|AFTER */
{ yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/ }
break;
- case 259: /* trigger_time ::= INSTEAD OF */
+ case 261: /* trigger_time ::= INSTEAD OF */
{ yymsp[-1].minor.yy394 = TK_INSTEAD;}
break;
- case 260: /* trigger_time ::= */
+ case 262: /* trigger_time ::= */
{ yymsp[1].minor.yy394 = TK_BEFORE; }
break;
- case 261: /* trigger_event ::= DELETE|INSERT */
- case 262: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==262);
+ case 263: /* trigger_event ::= DELETE|INSERT */
+ case 264: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==264);
{yymsp[0].minor.yy180.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy180.b = 0;}
break;
- case 263: /* trigger_event ::= UPDATE OF idlist */
+ case 265: /* trigger_event ::= UPDATE OF idlist */
{yymsp[-2].minor.yy180.a = TK_UPDATE; yymsp[-2].minor.yy180.b = yymsp[0].minor.yy254;}
break;
- case 264: /* when_clause ::= */
- case 283: /* key_opt ::= */ yytestcase(yyruleno==283);
+ case 266: /* when_clause ::= */
+ case 285: /* key_opt ::= */ yytestcase(yyruleno==285);
{ yymsp[1].minor.yy528 = 0; }
break;
- case 265: /* when_clause ::= WHEN expr */
- case 284: /* key_opt ::= KEY expr */ yytestcase(yyruleno==284);
+ case 267: /* when_clause ::= WHEN expr */
+ case 286: /* key_opt ::= KEY expr */ yytestcase(yyruleno==286);
{ yymsp[-1].minor.yy528 = yymsp[0].minor.yy528; }
break;
- case 266: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ case 268: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
{
assert( yymsp[-2].minor.yy33!=0 );
yymsp[-2].minor.yy33->pLast->pNext = yymsp[-1].minor.yy33;
yymsp[-2].minor.yy33->pLast = yymsp[-1].minor.yy33;
}
break;
- case 267: /* trigger_cmd_list ::= trigger_cmd SEMI */
+ case 269: /* trigger_cmd_list ::= trigger_cmd SEMI */
{
assert( yymsp[-1].minor.yy33!=0 );
yymsp[-1].minor.yy33->pLast = yymsp[-1].minor.yy33;
}
break;
- case 268: /* trnm ::= nm DOT nm */
+ case 270: /* trnm ::= nm DOT nm */
{
yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;
sqlite3ErrorMsg(pParse,
@@ -172450,39 +175664,39 @@ static YYACTIONTYPE yy_reduce(
"statements within triggers");
}
break;
- case 269: /* tridxby ::= INDEXED BY nm */
+ case 271: /* tridxby ::= INDEXED BY nm */
{
sqlite3ErrorMsg(pParse,
"the INDEXED BY clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 270: /* tridxby ::= NOT INDEXED */
+ case 272: /* tridxby ::= NOT INDEXED */
{
sqlite3ErrorMsg(pParse,
"the NOT INDEXED clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 271: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+ case 273: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
{yylhsminor.yy33 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy131, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528, yymsp[-7].minor.yy394, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy522);}
yymsp[-8].minor.yy33 = yylhsminor.yy33;
break;
- case 272: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ case 274: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
{
yylhsminor.yy33 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy254,yymsp[-2].minor.yy47,yymsp[-6].minor.yy394,yymsp[-1].minor.yy444,yymsp[-7].minor.yy522,yymsp[0].minor.yy522);/*yylhsminor.yy33-overwrites-yymsp[-6].minor.yy394*/
}
yymsp[-7].minor.yy33 = yylhsminor.yy33;
break;
- case 273: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ case 275: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
{yylhsminor.yy33 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy528, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy522);}
yymsp[-5].minor.yy33 = yylhsminor.yy33;
break;
- case 274: /* trigger_cmd ::= scanpt select scanpt */
+ case 276: /* trigger_cmd ::= scanpt select scanpt */
{yylhsminor.yy33 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy47, yymsp[-2].minor.yy522, yymsp[0].minor.yy522); /*yylhsminor.yy33-overwrites-yymsp[-1].minor.yy47*/}
yymsp[-2].minor.yy33 = yylhsminor.yy33;
break;
- case 275: /* expr ::= RAISE LP IGNORE RP */
+ case 277: /* expr ::= RAISE LP IGNORE RP */
{
yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_RAISE, 0, 0);
if( yymsp[-3].minor.yy528 ){
@@ -172490,7 +175704,7 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 276: /* expr ::= RAISE LP raisetype COMMA nm RP */
+ case 278: /* expr ::= RAISE LP raisetype COMMA nm RP */
{
yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1);
if( yymsp[-5].minor.yy528 ) {
@@ -172498,118 +175712,114 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 277: /* raisetype ::= ROLLBACK */
+ case 279: /* raisetype ::= ROLLBACK */
{yymsp[0].minor.yy394 = OE_Rollback;}
break;
- case 279: /* raisetype ::= FAIL */
+ case 281: /* raisetype ::= FAIL */
{yymsp[0].minor.yy394 = OE_Fail;}
break;
- case 280: /* cmd ::= DROP TRIGGER ifexists fullname */
+ case 282: /* cmd ::= DROP TRIGGER ifexists fullname */
{
sqlite3DropTrigger(pParse,yymsp[0].minor.yy131,yymsp[-1].minor.yy394);
}
break;
- case 281: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ case 283: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
{
sqlite3Attach(pParse, yymsp[-3].minor.yy528, yymsp[-1].minor.yy528, yymsp[0].minor.yy528);
}
break;
- case 282: /* cmd ::= DETACH database_kw_opt expr */
+ case 284: /* cmd ::= DETACH database_kw_opt expr */
{
sqlite3Detach(pParse, yymsp[0].minor.yy528);
}
break;
- case 285: /* cmd ::= REINDEX */
+ case 287: /* cmd ::= REINDEX */
{sqlite3Reindex(pParse, 0, 0);}
break;
- case 286: /* cmd ::= REINDEX nm dbnm */
+ case 288: /* cmd ::= REINDEX nm dbnm */
{sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 287: /* cmd ::= ANALYZE */
+ case 289: /* cmd ::= ANALYZE */
{sqlite3Analyze(pParse, 0, 0);}
break;
- case 288: /* cmd ::= ANALYZE nm dbnm */
+ case 290: /* cmd ::= ANALYZE nm dbnm */
{sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 289: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
+ case 291: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
{
sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy131,&yymsp[0].minor.yy0);
}
break;
- case 290: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ case 292: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
{
yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n;
sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0);
}
break;
- case 291: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
+ case 293: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
{
sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy131, &yymsp[0].minor.yy0);
}
break;
- case 292: /* add_column_fullname ::= fullname */
+ case 294: /* add_column_fullname ::= fullname */
{
disableLookaside(pParse);
sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy131);
}
break;
- case 293: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+ case 295: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
{
sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy131, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
}
break;
- case 294: /* cmd ::= create_vtab */
+ case 296: /* cmd ::= create_vtab */
{sqlite3VtabFinishParse(pParse,0);}
break;
- case 295: /* cmd ::= create_vtab LP vtabarglist RP */
+ case 297: /* cmd ::= create_vtab LP vtabarglist RP */
{sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);}
break;
- case 296: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ case 298: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
{
sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy394);
}
break;
- case 297: /* vtabarg ::= */
+ case 299: /* vtabarg ::= */
{sqlite3VtabArgInit(pParse);}
break;
- case 298: /* vtabargtoken ::= ANY */
- case 299: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==299);
- case 300: /* lp ::= LP */ yytestcase(yyruleno==300);
+ case 300: /* vtabargtoken ::= ANY */
+ case 301: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==301);
+ case 302: /* lp ::= LP */ yytestcase(yyruleno==302);
{sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);}
break;
- case 301: /* with ::= WITH wqlist */
- case 302: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==302);
+ case 303: /* with ::= WITH wqlist */
+ case 304: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==304);
{ sqlite3WithPush(pParse, yymsp[0].minor.yy521, 1); }
break;
- case 303: /* wqas ::= AS */
+ case 305: /* wqas ::= AS */
{yymsp[0].minor.yy516 = M10d_Any;}
break;
- case 304: /* wqas ::= AS MATERIALIZED */
+ case 306: /* wqas ::= AS MATERIALIZED */
{yymsp[-1].minor.yy516 = M10d_Yes;}
break;
- case 305: /* wqas ::= AS NOT MATERIALIZED */
+ case 307: /* wqas ::= AS NOT MATERIALIZED */
{yymsp[-2].minor.yy516 = M10d_No;}
break;
- case 306: /* wqitem ::= nm eidlist_opt wqas LP select RP */
+ case 308: /* wqitem ::= nm eidlist_opt wqas LP select RP */
{
yymsp[-5].minor.yy385 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy322, yymsp[-1].minor.yy47, yymsp[-3].minor.yy516); /*A-overwrites-X*/
}
break;
- case 307: /* wqlist ::= wqitem */
+ case 309: /* wqlist ::= wqitem */
{
yymsp[0].minor.yy521 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy385); /*A-overwrites-X*/
}
break;
- case 308: /* wqlist ::= wqlist COMMA wqitem */
+ case 310: /* wqlist ::= wqlist COMMA wqitem */
{
yymsp[-2].minor.yy521 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy521, yymsp[0].minor.yy385);
}
break;
- case 309: /* windowdefn_list ::= windowdefn */
-{ yylhsminor.yy41 = yymsp[0].minor.yy41; }
- yymsp[0].minor.yy41 = yylhsminor.yy41;
- break;
- case 310: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */
+ case 311: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */
{
assert( yymsp[0].minor.yy41!=0 );
sqlite3WindowChain(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy41);
@@ -172618,7 +175828,7 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-2].minor.yy41 = yylhsminor.yy41;
break;
- case 311: /* windowdefn ::= nm AS LP window RP */
+ case 312: /* windowdefn ::= nm AS LP window RP */
{
if( ALWAYS(yymsp[-1].minor.yy41) ){
yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n);
@@ -172627,35 +175837,28 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-4].minor.yy41 = yylhsminor.yy41;
break;
- case 312: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+ case 313: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */
{
yymsp[-4].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, 0);
}
break;
- case 313: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+ case 314: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
{
yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, &yymsp[-5].minor.yy0);
}
yymsp[-5].minor.yy41 = yylhsminor.yy41;
break;
- case 314: /* window ::= ORDER BY sortlist frame_opt */
+ case 315: /* window ::= ORDER BY sortlist frame_opt */
{
yymsp[-3].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, 0);
}
break;
- case 315: /* window ::= nm ORDER BY sortlist frame_opt */
+ case 316: /* window ::= nm ORDER BY sortlist frame_opt */
{
yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0);
}
yymsp[-4].minor.yy41 = yylhsminor.yy41;
break;
- case 316: /* window ::= frame_opt */
- case 335: /* filter_over ::= over_clause */ yytestcase(yyruleno==335);
-{
- yylhsminor.yy41 = yymsp[0].minor.yy41;
-}
- yymsp[0].minor.yy41 = yylhsminor.yy41;
- break;
case 317: /* window ::= nm frame_opt */
{
yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, 0, &yymsp[-1].minor.yy0);
@@ -172721,6 +175924,12 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-1].minor.yy41 = yylhsminor.yy41;
break;
+ case 335: /* filter_over ::= over_clause */
+{
+ yylhsminor.yy41 = yymsp[0].minor.yy41;
+}
+ yymsp[0].minor.yy41 = yylhsminor.yy41;
+ break;
case 336: /* filter_over ::= filter_clause */
{
yylhsminor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window));
@@ -172814,6 +176023,8 @@ static YYACTIONTYPE yy_reduce(
/* (400) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==400);
/* (401) anylist ::= anylist ANY */ yytestcase(yyruleno==401);
/* (402) with ::= */ yytestcase(yyruleno==402);
+ /* (403) windowdefn_list ::= windowdefn (OPTIMIZED OUT) */ assert(yyruleno!=403);
+ /* (404) window ::= frame_opt (OPTIMIZED OUT) */ assert(yyruleno!=404);
break;
/********** End reduce actions ************************************************/
};
@@ -173602,180 +176813,179 @@ static const unsigned char aKWCode[148] = {0,
static int keywordCode(const char *z, int n, int *pType){
int i, j;
const char *zKW;
- if( n>=2 ){
- i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n*1) % 127;
- for(i=(int)aKWHash[i]; i>0; i=aKWNext[i]){
- if( aKWLen[i]!=n ) continue;
- zKW = &zKWText[aKWOffset[i]];
+ assert( n>=2 );
+ i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n*1) % 127;
+ for(i=(int)aKWHash[i]; i>0; i=aKWNext[i]){
+ if( aKWLen[i]!=n ) continue;
+ zKW = &zKWText[aKWOffset[i]];
#ifdef SQLITE_ASCII
- if( (z[0]&~0x20)!=zKW[0] ) continue;
- if( (z[1]&~0x20)!=zKW[1] ) continue;
- j = 2;
- while( j<n && (z[j]&~0x20)==zKW[j] ){ j++; }
+ if( (z[0]&~0x20)!=zKW[0] ) continue;
+ if( (z[1]&~0x20)!=zKW[1] ) continue;
+ j = 2;
+ while( j<n && (z[j]&~0x20)==zKW[j] ){ j++; }
#endif
#ifdef SQLITE_EBCDIC
- if( toupper(z[0])!=zKW[0] ) continue;
- if( toupper(z[1])!=zKW[1] ) continue;
- j = 2;
- while( j<n && toupper(z[j])==zKW[j] ){ j++; }
-#endif
- if( j<n ) continue;
- testcase( i==1 ); /* REINDEX */
- testcase( i==2 ); /* INDEXED */
- testcase( i==3 ); /* INDEX */
- testcase( i==4 ); /* DESC */
- testcase( i==5 ); /* ESCAPE */
- testcase( i==6 ); /* EACH */
- testcase( i==7 ); /* CHECK */
- testcase( i==8 ); /* KEY */
- testcase( i==9 ); /* BEFORE */
- testcase( i==10 ); /* FOREIGN */
- testcase( i==11 ); /* FOR */
- testcase( i==12 ); /* IGNORE */
- testcase( i==13 ); /* REGEXP */
- testcase( i==14 ); /* EXPLAIN */
- testcase( i==15 ); /* INSTEAD */
- testcase( i==16 ); /* ADD */
- testcase( i==17 ); /* DATABASE */
- testcase( i==18 ); /* AS */
- testcase( i==19 ); /* SELECT */
- testcase( i==20 ); /* TABLE */
- testcase( i==21 ); /* LEFT */
- testcase( i==22 ); /* THEN */
- testcase( i==23 ); /* END */
- testcase( i==24 ); /* DEFERRABLE */
- testcase( i==25 ); /* ELSE */
- testcase( i==26 ); /* EXCLUDE */
- testcase( i==27 ); /* DELETE */
- testcase( i==28 ); /* TEMPORARY */
- testcase( i==29 ); /* TEMP */
- testcase( i==30 ); /* OR */
- testcase( i==31 ); /* ISNULL */
- testcase( i==32 ); /* NULLS */
- testcase( i==33 ); /* SAVEPOINT */
- testcase( i==34 ); /* INTERSECT */
- testcase( i==35 ); /* TIES */
- testcase( i==36 ); /* NOTNULL */
- testcase( i==37 ); /* NOT */
- testcase( i==38 ); /* NO */
- testcase( i==39 ); /* NULL */
- testcase( i==40 ); /* LIKE */
- testcase( i==41 ); /* EXCEPT */
- testcase( i==42 ); /* TRANSACTION */
- testcase( i==43 ); /* ACTION */
- testcase( i==44 ); /* ON */
- testcase( i==45 ); /* NATURAL */
- testcase( i==46 ); /* ALTER */
- testcase( i==47 ); /* RAISE */
- testcase( i==48 ); /* EXCLUSIVE */
- testcase( i==49 ); /* EXISTS */
- testcase( i==50 ); /* CONSTRAINT */
- testcase( i==51 ); /* INTO */
- testcase( i==52 ); /* OFFSET */
- testcase( i==53 ); /* OF */
- testcase( i==54 ); /* SET */
- testcase( i==55 ); /* TRIGGER */
- testcase( i==56 ); /* RANGE */
- testcase( i==57 ); /* GENERATED */
- testcase( i==58 ); /* DETACH */
- testcase( i==59 ); /* HAVING */
- testcase( i==60 ); /* GLOB */
- testcase( i==61 ); /* BEGIN */
- testcase( i==62 ); /* INNER */
- testcase( i==63 ); /* REFERENCES */
- testcase( i==64 ); /* UNIQUE */
- testcase( i==65 ); /* QUERY */
- testcase( i==66 ); /* WITHOUT */
- testcase( i==67 ); /* WITH */
- testcase( i==68 ); /* OUTER */
- testcase( i==69 ); /* RELEASE */
- testcase( i==70 ); /* ATTACH */
- testcase( i==71 ); /* BETWEEN */
- testcase( i==72 ); /* NOTHING */
- testcase( i==73 ); /* GROUPS */
- testcase( i==74 ); /* GROUP */
- testcase( i==75 ); /* CASCADE */
- testcase( i==76 ); /* ASC */
- testcase( i==77 ); /* DEFAULT */
- testcase( i==78 ); /* CASE */
- testcase( i==79 ); /* COLLATE */
- testcase( i==80 ); /* CREATE */
- testcase( i==81 ); /* CURRENT_DATE */
- testcase( i==82 ); /* IMMEDIATE */
- testcase( i==83 ); /* JOIN */
- testcase( i==84 ); /* INSERT */
- testcase( i==85 ); /* MATCH */
- testcase( i==86 ); /* PLAN */
- testcase( i==87 ); /* ANALYZE */
- testcase( i==88 ); /* PRAGMA */
- testcase( i==89 ); /* MATERIALIZED */
- testcase( i==90 ); /* DEFERRED */
- testcase( i==91 ); /* DISTINCT */
- testcase( i==92 ); /* IS */
- testcase( i==93 ); /* UPDATE */
- testcase( i==94 ); /* VALUES */
- testcase( i==95 ); /* VIRTUAL */
- testcase( i==96 ); /* ALWAYS */
- testcase( i==97 ); /* WHEN */
- testcase( i==98 ); /* WHERE */
- testcase( i==99 ); /* RECURSIVE */
- testcase( i==100 ); /* ABORT */
- testcase( i==101 ); /* AFTER */
- testcase( i==102 ); /* RENAME */
- testcase( i==103 ); /* AND */
- testcase( i==104 ); /* DROP */
- testcase( i==105 ); /* PARTITION */
- testcase( i==106 ); /* AUTOINCREMENT */
- testcase( i==107 ); /* TO */
- testcase( i==108 ); /* IN */
- testcase( i==109 ); /* CAST */
- testcase( i==110 ); /* COLUMN */
- testcase( i==111 ); /* COMMIT */
- testcase( i==112 ); /* CONFLICT */
- testcase( i==113 ); /* CROSS */
- testcase( i==114 ); /* CURRENT_TIMESTAMP */
- testcase( i==115 ); /* CURRENT_TIME */
- testcase( i==116 ); /* CURRENT */
- testcase( i==117 ); /* PRECEDING */
- testcase( i==118 ); /* FAIL */
- testcase( i==119 ); /* LAST */
- testcase( i==120 ); /* FILTER */
- testcase( i==121 ); /* REPLACE */
- testcase( i==122 ); /* FIRST */
- testcase( i==123 ); /* FOLLOWING */
- testcase( i==124 ); /* FROM */
- testcase( i==125 ); /* FULL */
- testcase( i==126 ); /* LIMIT */
- testcase( i==127 ); /* IF */
- testcase( i==128 ); /* ORDER */
- testcase( i==129 ); /* RESTRICT */
- testcase( i==130 ); /* OTHERS */
- testcase( i==131 ); /* OVER */
- testcase( i==132 ); /* RETURNING */
- testcase( i==133 ); /* RIGHT */
- testcase( i==134 ); /* ROLLBACK */
- testcase( i==135 ); /* ROWS */
- testcase( i==136 ); /* ROW */
- testcase( i==137 ); /* UNBOUNDED */
- testcase( i==138 ); /* UNION */
- testcase( i==139 ); /* USING */
- testcase( i==140 ); /* VACUUM */
- testcase( i==141 ); /* VIEW */
- testcase( i==142 ); /* WINDOW */
- testcase( i==143 ); /* DO */
- testcase( i==144 ); /* BY */
- testcase( i==145 ); /* INITIALLY */
- testcase( i==146 ); /* ALL */
- testcase( i==147 ); /* PRIMARY */
- *pType = aKWCode[i];
- break;
- }
+ if( toupper(z[0])!=zKW[0] ) continue;
+ if( toupper(z[1])!=zKW[1] ) continue;
+ j = 2;
+ while( j<n && toupper(z[j])==zKW[j] ){ j++; }
+#endif
+ if( j<n ) continue;
+ testcase( i==1 ); /* REINDEX */
+ testcase( i==2 ); /* INDEXED */
+ testcase( i==3 ); /* INDEX */
+ testcase( i==4 ); /* DESC */
+ testcase( i==5 ); /* ESCAPE */
+ testcase( i==6 ); /* EACH */
+ testcase( i==7 ); /* CHECK */
+ testcase( i==8 ); /* KEY */
+ testcase( i==9 ); /* BEFORE */
+ testcase( i==10 ); /* FOREIGN */
+ testcase( i==11 ); /* FOR */
+ testcase( i==12 ); /* IGNORE */
+ testcase( i==13 ); /* REGEXP */
+ testcase( i==14 ); /* EXPLAIN */
+ testcase( i==15 ); /* INSTEAD */
+ testcase( i==16 ); /* ADD */
+ testcase( i==17 ); /* DATABASE */
+ testcase( i==18 ); /* AS */
+ testcase( i==19 ); /* SELECT */
+ testcase( i==20 ); /* TABLE */
+ testcase( i==21 ); /* LEFT */
+ testcase( i==22 ); /* THEN */
+ testcase( i==23 ); /* END */
+ testcase( i==24 ); /* DEFERRABLE */
+ testcase( i==25 ); /* ELSE */
+ testcase( i==26 ); /* EXCLUDE */
+ testcase( i==27 ); /* DELETE */
+ testcase( i==28 ); /* TEMPORARY */
+ testcase( i==29 ); /* TEMP */
+ testcase( i==30 ); /* OR */
+ testcase( i==31 ); /* ISNULL */
+ testcase( i==32 ); /* NULLS */
+ testcase( i==33 ); /* SAVEPOINT */
+ testcase( i==34 ); /* INTERSECT */
+ testcase( i==35 ); /* TIES */
+ testcase( i==36 ); /* NOTNULL */
+ testcase( i==37 ); /* NOT */
+ testcase( i==38 ); /* NO */
+ testcase( i==39 ); /* NULL */
+ testcase( i==40 ); /* LIKE */
+ testcase( i==41 ); /* EXCEPT */
+ testcase( i==42 ); /* TRANSACTION */
+ testcase( i==43 ); /* ACTION */
+ testcase( i==44 ); /* ON */
+ testcase( i==45 ); /* NATURAL */
+ testcase( i==46 ); /* ALTER */
+ testcase( i==47 ); /* RAISE */
+ testcase( i==48 ); /* EXCLUSIVE */
+ testcase( i==49 ); /* EXISTS */
+ testcase( i==50 ); /* CONSTRAINT */
+ testcase( i==51 ); /* INTO */
+ testcase( i==52 ); /* OFFSET */
+ testcase( i==53 ); /* OF */
+ testcase( i==54 ); /* SET */
+ testcase( i==55 ); /* TRIGGER */
+ testcase( i==56 ); /* RANGE */
+ testcase( i==57 ); /* GENERATED */
+ testcase( i==58 ); /* DETACH */
+ testcase( i==59 ); /* HAVING */
+ testcase( i==60 ); /* GLOB */
+ testcase( i==61 ); /* BEGIN */
+ testcase( i==62 ); /* INNER */
+ testcase( i==63 ); /* REFERENCES */
+ testcase( i==64 ); /* UNIQUE */
+ testcase( i==65 ); /* QUERY */
+ testcase( i==66 ); /* WITHOUT */
+ testcase( i==67 ); /* WITH */
+ testcase( i==68 ); /* OUTER */
+ testcase( i==69 ); /* RELEASE */
+ testcase( i==70 ); /* ATTACH */
+ testcase( i==71 ); /* BETWEEN */
+ testcase( i==72 ); /* NOTHING */
+ testcase( i==73 ); /* GROUPS */
+ testcase( i==74 ); /* GROUP */
+ testcase( i==75 ); /* CASCADE */
+ testcase( i==76 ); /* ASC */
+ testcase( i==77 ); /* DEFAULT */
+ testcase( i==78 ); /* CASE */
+ testcase( i==79 ); /* COLLATE */
+ testcase( i==80 ); /* CREATE */
+ testcase( i==81 ); /* CURRENT_DATE */
+ testcase( i==82 ); /* IMMEDIATE */
+ testcase( i==83 ); /* JOIN */
+ testcase( i==84 ); /* INSERT */
+ testcase( i==85 ); /* MATCH */
+ testcase( i==86 ); /* PLAN */
+ testcase( i==87 ); /* ANALYZE */
+ testcase( i==88 ); /* PRAGMA */
+ testcase( i==89 ); /* MATERIALIZED */
+ testcase( i==90 ); /* DEFERRED */
+ testcase( i==91 ); /* DISTINCT */
+ testcase( i==92 ); /* IS */
+ testcase( i==93 ); /* UPDATE */
+ testcase( i==94 ); /* VALUES */
+ testcase( i==95 ); /* VIRTUAL */
+ testcase( i==96 ); /* ALWAYS */
+ testcase( i==97 ); /* WHEN */
+ testcase( i==98 ); /* WHERE */
+ testcase( i==99 ); /* RECURSIVE */
+ testcase( i==100 ); /* ABORT */
+ testcase( i==101 ); /* AFTER */
+ testcase( i==102 ); /* RENAME */
+ testcase( i==103 ); /* AND */
+ testcase( i==104 ); /* DROP */
+ testcase( i==105 ); /* PARTITION */
+ testcase( i==106 ); /* AUTOINCREMENT */
+ testcase( i==107 ); /* TO */
+ testcase( i==108 ); /* IN */
+ testcase( i==109 ); /* CAST */
+ testcase( i==110 ); /* COLUMN */
+ testcase( i==111 ); /* COMMIT */
+ testcase( i==112 ); /* CONFLICT */
+ testcase( i==113 ); /* CROSS */
+ testcase( i==114 ); /* CURRENT_TIMESTAMP */
+ testcase( i==115 ); /* CURRENT_TIME */
+ testcase( i==116 ); /* CURRENT */
+ testcase( i==117 ); /* PRECEDING */
+ testcase( i==118 ); /* FAIL */
+ testcase( i==119 ); /* LAST */
+ testcase( i==120 ); /* FILTER */
+ testcase( i==121 ); /* REPLACE */
+ testcase( i==122 ); /* FIRST */
+ testcase( i==123 ); /* FOLLOWING */
+ testcase( i==124 ); /* FROM */
+ testcase( i==125 ); /* FULL */
+ testcase( i==126 ); /* LIMIT */
+ testcase( i==127 ); /* IF */
+ testcase( i==128 ); /* ORDER */
+ testcase( i==129 ); /* RESTRICT */
+ testcase( i==130 ); /* OTHERS */
+ testcase( i==131 ); /* OVER */
+ testcase( i==132 ); /* RETURNING */
+ testcase( i==133 ); /* RIGHT */
+ testcase( i==134 ); /* ROLLBACK */
+ testcase( i==135 ); /* ROWS */
+ testcase( i==136 ); /* ROW */
+ testcase( i==137 ); /* UNBOUNDED */
+ testcase( i==138 ); /* UNION */
+ testcase( i==139 ); /* USING */
+ testcase( i==140 ); /* VACUUM */
+ testcase( i==141 ); /* VIEW */
+ testcase( i==142 ); /* WINDOW */
+ testcase( i==143 ); /* DO */
+ testcase( i==144 ); /* BY */
+ testcase( i==145 ); /* INITIALLY */
+ testcase( i==146 ); /* ALL */
+ testcase( i==147 ); /* PRIMARY */
+ *pType = aKWCode[i];
+ break;
}
return n;
}
SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char *z, int n){
int id = TK_ID;
- keywordCode((char*)z, n, &id);
+ if( n>=2 ) keywordCode((char*)z, n, &id);
return id;
}
#define SQLITE_N_KEYWORD 147
@@ -174080,7 +177290,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){
testcase( z[0]=='0' ); testcase( z[0]=='1' ); testcase( z[0]=='2' );
testcase( z[0]=='3' ); testcase( z[0]=='4' ); testcase( z[0]=='5' );
testcase( z[0]=='6' ); testcase( z[0]=='7' ); testcase( z[0]=='8' );
- testcase( z[0]=='9' );
+ testcase( z[0]=='9' ); testcase( z[0]=='.' );
*tokenType = TK_INTEGER;
#ifndef SQLITE_OMIT_HEX_INTEGER
if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){
@@ -174152,7 +177362,8 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){
return i;
}
case CC_KYWD0: {
- for(i=1; aiClass[z[i]]<=CC_KYWD; i++){}
+ if( aiClass[z[1]]>CC_KYWD ){ i = 1; break; }
+ for(i=2; aiClass[z[i]]<=CC_KYWD; i++){}
if( IdChar(z[i]) ){
/* This token started out using characters that can appear in keywords,
** but z[i] is a character not allowed within keywords, so this must
@@ -174931,30 +178142,20 @@ static int sqlite3TestExtInit(sqlite3 *db){
** Forward declarations of external module initializer functions
** for modules that need them.
*/
-#ifdef SQLITE_ENABLE_FTS1
-SQLITE_PRIVATE int sqlite3Fts1Init(sqlite3*);
-#endif
-#ifdef SQLITE_ENABLE_FTS2
-SQLITE_PRIVATE int sqlite3Fts2Init(sqlite3*);
-#endif
#ifdef SQLITE_ENABLE_FTS5
SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3*);
#endif
#ifdef SQLITE_ENABLE_STMTVTAB
SQLITE_PRIVATE int sqlite3StmtVtabInit(sqlite3*);
#endif
-
+#ifdef SQLITE_EXTRA_AUTOEXT
+int SQLITE_EXTRA_AUTOEXT(sqlite3*);
+#endif
/*
** An array of pointers to extension initializer functions for
** built-in extensions.
*/
static int (*const sqlite3BuiltinExtensions[])(sqlite3*) = {
-#ifdef SQLITE_ENABLE_FTS1
- sqlite3Fts1Init,
-#endif
-#ifdef SQLITE_ENABLE_FTS2
- sqlite3Fts2Init,
-#endif
#ifdef SQLITE_ENABLE_FTS3
sqlite3Fts3Init,
#endif
@@ -174983,6 +178184,9 @@ static int (*const sqlite3BuiltinExtensions[])(sqlite3*) = {
#ifdef SQLITE_ENABLE_BYTECODE_VTAB
sqlite3VdbeBytecodeVtabInit,
#endif
+#ifdef SQLITE_EXTRA_AUTOEXT
+ SQLITE_EXTRA_AUTOEXT,
+#endif
};
#ifndef SQLITE_AMALGAMATION
@@ -175057,6 +178261,32 @@ SQLITE_API char *sqlite3_temp_directory = 0;
SQLITE_API char *sqlite3_data_directory = 0;
/*
+** Determine whether or not high-precision (long double) floating point
+** math works correctly on CPU currently running.
+*/
+static SQLITE_NOINLINE int hasHighPrecisionDouble(int rc){
+ if( sizeof(LONGDOUBLE_TYPE)<=8 ){
+ /* If the size of "long double" is not more than 8, then
+ ** high-precision math is not possible. */
+ return 0;
+ }else{
+ /* Just because sizeof(long double)>8 does not mean that the underlying
+ ** hardware actually supports high-precision floating point. For example,
+ ** clearing the 0x100 bit in the floating-point control word on Intel
+ ** processors will make long double work like double, even though long
+ ** double takes up more space. The only way to determine if long double
+ ** actually works is to run an experiment. */
+ LONGDOUBLE_TYPE a, b, c;
+ rc++;
+ a = 1.0+rc*0.1;
+ b = 1.0e+18+rc*25.0;
+ c = a+b;
+ return b!=c;
+ }
+}
+
+
+/*
** Initialize SQLite.
**
** This routine must be called to initialize the memory allocation,
@@ -175251,6 +178481,12 @@ SQLITE_API int sqlite3_initialize(void){
}
#endif
+ /* Experimentally determine if high-precision floating point is
+ ** available. */
+#ifndef SQLITE_OMIT_WSD
+ sqlite3Config.bUseLongDouble = hasHighPrecisionDouble(rc);
+#endif
+
return rc;
}
@@ -175821,6 +179057,10 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3 *db){
SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){
va_list ap;
int rc;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
sqlite3_mutex_enter(db->mutex);
va_start(ap, op);
switch( op ){
@@ -176150,6 +179390,14 @@ static int sqlite3Close(sqlite3 *db, int forceZombie){
}
#endif
+ while( db->pDbData ){
+ DbClientData *p = db->pDbData;
+ db->pDbData = p->pNext;
+ assert( p->pData!=0 );
+ if( p->xDestructor ) p->xDestructor(p->pData);
+ sqlite3_free(p);
+ }
+
/* Convert the connection into a zombie and then close it.
*/
db->eOpenState = SQLITE_STATE_ZOMBIE;
@@ -176567,9 +179815,9 @@ static int sqliteDefaultBusyCallback(
void *ptr, /* Database connection */
int count /* Number of times table has been busy */
){
-#if SQLITE_OS_WIN || HAVE_USLEEP
+#if SQLITE_OS_WIN || !defined(HAVE_NANOSLEEP) || HAVE_NANOSLEEP
/* This case is for systems that have support for sleeping for fractions of
- ** a second. Examples: All windows systems, unix systems with usleep() */
+ ** a second. Examples: All windows systems, unix systems with nanosleep() */
static const u8 delays[] =
{ 1, 2, 5, 10, 15, 20, 25, 25, 25, 50, 50, 100 };
static const u8 totals[] =
@@ -176767,7 +180015,7 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC );
assert( SQLITE_FUNC_DIRECT==SQLITE_DIRECTONLY );
extraFlags = enc & (SQLITE_DETERMINISTIC|SQLITE_DIRECTONLY|
- SQLITE_SUBTYPE|SQLITE_INNOCUOUS);
+ SQLITE_SUBTYPE|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE);
enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY);
/* The SQLITE_INNOCUOUS flag is the same bit as SQLITE_FUNC_UNSAFE. But
@@ -177224,6 +180472,12 @@ SQLITE_API void *sqlite3_preupdate_hook(
void *pArg /* First callback argument */
){
void *pRet;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( db==0 ){
+ return 0;
+ }
+#endif
sqlite3_mutex_enter(db->mutex);
pRet = db->pPreUpdateArg;
db->xPreUpdateCallback = xCallback;
@@ -177370,7 +180624,7 @@ SQLITE_API int sqlite3_wal_checkpoint_v2(
if( eMode<SQLITE_CHECKPOINT_PASSIVE || eMode>SQLITE_CHECKPOINT_TRUNCATE ){
/* EVIDENCE-OF: R-03996-12088 The M parameter must be a valid checkpoint
** mode: */
- return SQLITE_MISUSE;
+ return SQLITE_MISUSE_BKPT;
}
sqlite3_mutex_enter(db->mutex);
@@ -178207,7 +181461,7 @@ static int openDatabase(
** 0 off off
**
** Legacy behavior is 3 (double-quoted string literals are allowed anywhere)
-** and so that is the default. But developers are encouranged to use
+** and so that is the default. But developers are encouraged to use
** -DSQLITE_DQS=0 (best) or -DSQLITE_DQS=1 (second choice) if possible.
*/
#if !defined(SQLITE_DQS)
@@ -178607,6 +181861,69 @@ SQLITE_API int sqlite3_collation_needed16(
}
#endif /* SQLITE_OMIT_UTF16 */
+/*
+** Find existing client data.
+*/
+SQLITE_API void *sqlite3_get_clientdata(sqlite3 *db, const char *zName){
+ DbClientData *p;
+ sqlite3_mutex_enter(db->mutex);
+ for(p=db->pDbData; p; p=p->pNext){
+ if( strcmp(p->zName, zName)==0 ){
+ void *pResult = p->pData;
+ sqlite3_mutex_leave(db->mutex);
+ return pResult;
+ }
+ }
+ sqlite3_mutex_leave(db->mutex);
+ return 0;
+}
+
+/*
+** Add new client data to a database connection.
+*/
+SQLITE_API int sqlite3_set_clientdata(
+ sqlite3 *db, /* Attach client data to this connection */
+ const char *zName, /* Name of the client data */
+ void *pData, /* The client data itself */
+ void (*xDestructor)(void*) /* Destructor */
+){
+ DbClientData *p, **pp;
+ sqlite3_mutex_enter(db->mutex);
+ pp = &db->pDbData;
+ for(p=db->pDbData; p && strcmp(p->zName,zName); p=p->pNext){
+ pp = &p->pNext;
+ }
+ if( p ){
+ assert( p->pData!=0 );
+ if( p->xDestructor ) p->xDestructor(p->pData);
+ if( pData==0 ){
+ *pp = p->pNext;
+ sqlite3_free(p);
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+ }
+ }else if( pData==0 ){
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+ }else{
+ size_t n = strlen(zName);
+ p = sqlite3_malloc64( sizeof(DbClientData)+n+1 );
+ if( p==0 ){
+ if( xDestructor ) xDestructor(pData);
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_NOMEM;
+ }
+ memcpy(p->zName, zName, n+1);
+ p->pNext = db->pDbData;
+ db->pDbData = p;
+ }
+ p->pData = pData;
+ p->xDestructor = xDestructor;
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+}
+
+
#ifndef SQLITE_OMIT_DEPRECATED
/*
** This function is now an anachronism. It used to be used to recover from a
@@ -178742,7 +182059,7 @@ SQLITE_API int sqlite3_table_column_metadata(
/* Find the column for which info is requested */
if( zColumnName==0 ){
- /* Query for existance of table only */
+ /* Query for existence of table only */
}else{
for(iCol=0; iCol<pTab->nCol; iCol++){
pCol = &pTab->aCol[iCol];
@@ -178956,6 +182273,28 @@ SQLITE_API int sqlite3_test_control(int op, ...){
}
#endif
+ /* sqlite3_test_control(SQLITE_TESTCTRL_FK_NO_ACTION, sqlite3 *db, int b);
+ **
+ ** If b is true, then activate the SQLITE_FkNoAction setting. If b is
+ ** false then clearn that setting. If the SQLITE_FkNoAction setting is
+ ** abled, all foreign key ON DELETE and ON UPDATE actions behave as if
+ ** they were NO ACTION, regardless of how they are defined.
+ **
+ ** NB: One must usually run "PRAGMA writable_schema=RESET" after
+ ** using this test-control, before it will take full effect. failing
+ ** to reset the schema can result in some unexpected behavior.
+ */
+ case SQLITE_TESTCTRL_FK_NO_ACTION: {
+ sqlite3 *db = va_arg(ap, sqlite3*);
+ int b = va_arg(ap, int);
+ if( b ){
+ db->flags |= SQLITE_FkNoAction;
+ }else{
+ db->flags &= ~SQLITE_FkNoAction;
+ }
+ break;
+ }
+
/*
** sqlite3_test_control(BITVEC_TEST, size, program)
**
@@ -179062,10 +182401,12 @@ SQLITE_API int sqlite3_test_control(int op, ...){
sqlite3ShowSrcList(0);
sqlite3ShowWith(0);
sqlite3ShowUpsert(0);
+#ifndef SQLITE_OMIT_TRIGGER
sqlite3ShowTriggerStep(0);
sqlite3ShowTriggerStepList(0);
sqlite3ShowTrigger(0);
sqlite3ShowTriggerList(0);
+#endif
#ifndef SQLITE_OMIT_WINDOWFUNC
sqlite3ShowWindow(0);
sqlite3ShowWinFunc(0);
@@ -179182,7 +182523,7 @@ SQLITE_API int sqlite3_test_control(int op, ...){
** formed and never corrupt. This flag is clear by default, indicating that
** database files might have arbitrary corruption. Setting the flag during
** testing causes certain assert() statements in the code to be activated
- ** that demonstrat invariants on well-formed database files.
+ ** that demonstrate invariants on well-formed database files.
*/
case SQLITE_TESTCTRL_NEVER_CORRUPT: {
sqlite3GlobalConfig.neverCorrupt = va_arg(ap, int);
@@ -179336,7 +182677,7 @@ SQLITE_API int sqlite3_test_control(int op, ...){
**
** op==0 Store the current sqlite3TreeTrace in *ptr
** op==1 Set sqlite3TreeTrace to the value *ptr
- ** op==3 Store the current sqlite3WhereTrace in *ptr
+ ** op==2 Store the current sqlite3WhereTrace in *ptr
** op==3 Set sqlite3WhereTrace to the value *ptr
*/
case SQLITE_TESTCTRL_TRACEFLAGS: {
@@ -179372,6 +182713,23 @@ SQLITE_API int sqlite3_test_control(int op, ...){
break;
}
+#if !defined(SQLITE_OMIT_WSD)
+ /* sqlite3_test_control(SQLITE_TESTCTRL_USELONGDOUBLE, int X);
+ **
+ ** X<0 Make no changes to the bUseLongDouble. Just report value.
+ ** X==0 Disable bUseLongDouble
+ ** X==1 Enable bUseLongDouble
+ ** X>=2 Set bUseLongDouble to its default value for this platform
+ */
+ case SQLITE_TESTCTRL_USELONGDOUBLE: {
+ int b = va_arg(ap, int);
+ if( b>=2 ) b = hasHighPrecisionDouble(b);
+ if( b>=0 ) sqlite3Config.bUseLongDouble = b>0;
+ rc = sqlite3Config.bUseLongDouble!=0;
+ break;
+ }
+#endif
+
#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD)
/* sqlite3_test_control(SQLITE_TESTCTRL_TUNE, id, *piValue)
@@ -179402,6 +182760,28 @@ SQLITE_API int sqlite3_test_control(int op, ...){
break;
}
#endif
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_JSON_SELFCHECK, &onOff);
+ **
+ ** Activate or deactivate validation of JSONB that is generated from
+ ** text. Off by default, as the validation is slow. Validation is
+ ** only available if compiled using SQLITE_DEBUG.
+ **
+ ** If onOff is initially 1, then turn it on. If onOff is initially
+ ** off, turn it off. If onOff is initially -1, then change onOff
+ ** to be the current setting.
+ */
+ case SQLITE_TESTCTRL_JSON_SELFCHECK: {
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD)
+ int *pOnOff = va_arg(ap, int*);
+ if( *pOnOff<0 ){
+ *pOnOff = sqlite3Config.bJsonSelfcheck;
+ }else{
+ sqlite3Config.bJsonSelfcheck = (u8)((*pOnOff)&0xff);
+ }
+#endif
+ break;
+ }
}
va_end(ap);
#endif /* SQLITE_UNTESTABLE */
@@ -179672,7 +183052,7 @@ SQLITE_API int sqlite3_snapshot_get(
}
/*
-** Open a read-transaction on the snapshot idendified by pSnapshot.
+** Open a read-transaction on the snapshot identified by pSnapshot.
*/
SQLITE_API int sqlite3_snapshot_open(
sqlite3 *db,
@@ -179779,7 +183159,7 @@ SQLITE_API int sqlite3_compileoption_used(const char *zOptName){
int nOpt;
const char **azCompileOpt;
-#if SQLITE_ENABLE_API_ARMOR
+#ifdef SQLITE_ENABLE_API_ARMOR
if( zOptName==0 ){
(void)SQLITE_MISUSE_BKPT;
return 0;
@@ -179974,6 +183354,9 @@ SQLITE_API int sqlite3_unlock_notify(
){
int rc = SQLITE_OK;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
sqlite3_mutex_enter(db->mutex);
enterMutex();
@@ -180995,6 +184378,7 @@ struct Fts3Table {
int nPgsz; /* Page size for host database */
char *zSegmentsTbl; /* Name of %_segments table */
sqlite3_blob *pSegments; /* Blob handle open on %_segments table */
+ int iSavepoint;
/*
** The following array of hash tables is used to buffer pending index
@@ -181382,6 +184766,8 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int);
SQLITE_PRIVATE int sqlite3Fts3ExprIterate(Fts3Expr*, int (*x)(Fts3Expr*,int,void*), void*);
+SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk);
+
#endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */
#endif /* _FTSINT_H */
@@ -181738,6 +185124,7 @@ static void fts3DeclareVtab(int *pRc, Fts3Table *p){
zLanguageid = (p->zLanguageid ? p->zLanguageid : "__langid");
sqlite3_vtab_config(p->db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1);
+ sqlite3_vtab_config(p->db, SQLITE_VTAB_INNOCUOUS);
/* Create a list of user columns for the virtual table */
zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]);
@@ -184987,6 +188374,8 @@ static int fts3RenameMethod(
rc = sqlite3Fts3PendingTermsFlush(p);
}
+ p->bIgnoreSavepoint = 1;
+
if( p->zContentTbl==0 ){
fts3DbExec(&rc, db,
"ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';",
@@ -185014,6 +188403,8 @@ static int fts3RenameMethod(
"ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';",
p->zDb, p->zName, zName
);
+
+ p->bIgnoreSavepoint = 0;
return rc;
}
@@ -185024,12 +188415,28 @@ static int fts3RenameMethod(
*/
static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
int rc = SQLITE_OK;
- UNUSED_PARAMETER(iSavepoint);
- assert( ((Fts3Table *)pVtab)->inTransaction );
- assert( ((Fts3Table *)pVtab)->mxSavepoint <= iSavepoint );
- TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint );
- if( ((Fts3Table *)pVtab)->bIgnoreSavepoint==0 ){
- rc = fts3SyncMethod(pVtab);
+ Fts3Table *pTab = (Fts3Table*)pVtab;
+ assert( pTab->inTransaction );
+ assert( pTab->mxSavepoint<=iSavepoint );
+ TESTONLY( pTab->mxSavepoint = iSavepoint );
+
+ if( pTab->bIgnoreSavepoint==0 ){
+ if( fts3HashCount(&pTab->aIndex[0].hPending)>0 ){
+ char *zSql = sqlite3_mprintf("INSERT INTO %Q.%Q(%Q) VALUES('flush')",
+ pTab->zDb, pTab->zName, pTab->zName
+ );
+ if( zSql ){
+ pTab->bIgnoreSavepoint = 1;
+ rc = sqlite3_exec(pTab->db, zSql, 0, 0, 0);
+ pTab->bIgnoreSavepoint = 0;
+ sqlite3_free(zSql);
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ }
+ if( rc==SQLITE_OK ){
+ pTab->iSavepoint = iSavepoint+1;
+ }
}
return rc;
}
@@ -185040,12 +188447,11 @@ static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
** This is a no-op.
*/
static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){
- TESTONLY( Fts3Table *p = (Fts3Table*)pVtab );
- UNUSED_PARAMETER(iSavepoint);
- UNUSED_PARAMETER(pVtab);
- assert( p->inTransaction );
- assert( p->mxSavepoint >= iSavepoint );
- TESTONLY( p->mxSavepoint = iSavepoint-1 );
+ Fts3Table *pTab = (Fts3Table*)pVtab;
+ assert( pTab->inTransaction );
+ assert( pTab->mxSavepoint >= iSavepoint );
+ TESTONLY( pTab->mxSavepoint = iSavepoint-1 );
+ pTab->iSavepoint = iSavepoint;
return SQLITE_OK;
}
@@ -185055,11 +188461,13 @@ static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){
** Discard the contents of the pending terms table.
*/
static int fts3RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){
- Fts3Table *p = (Fts3Table*)pVtab;
+ Fts3Table *pTab = (Fts3Table*)pVtab;
UNUSED_PARAMETER(iSavepoint);
- assert( p->inTransaction );
- TESTONLY( p->mxSavepoint = iSavepoint );
- sqlite3Fts3PendingTermsClear(p);
+ assert( pTab->inTransaction );
+ TESTONLY( pTab->mxSavepoint = iSavepoint );
+ if( (iSavepoint+1)<=pTab->iSavepoint ){
+ sqlite3Fts3PendingTermsClear(pTab);
+ }
return SQLITE_OK;
}
@@ -185078,8 +188486,40 @@ static int fts3ShadowName(const char *zName){
return 0;
}
+/*
+** Implementation of the xIntegrity() method on the FTS3/FTS4 virtual
+** table.
+*/
+static int fts3IntegrityMethod(
+ sqlite3_vtab *pVtab, /* The virtual table to be checked */
+ const char *zSchema, /* Name of schema in which pVtab lives */
+ const char *zTabname, /* Name of the pVTab table */
+ int isQuick, /* True if this is a quick_check */
+ char **pzErr /* Write error message here */
+){
+ Fts3Table *p = (Fts3Table*)pVtab;
+ int rc;
+ int bOk = 0;
+
+ UNUSED_PARAMETER(isQuick);
+ rc = sqlite3Fts3IntegrityCheck(p, &bOk);
+ assert( rc!=SQLITE_CORRUPT_VTAB || bOk==0 );
+ if( rc!=SQLITE_OK && rc!=SQLITE_CORRUPT_VTAB ){
+ *pzErr = sqlite3_mprintf("unable to validate the inverted index for"
+ " FTS%d table %s.%s: %s",
+ p->bFts4 ? 4 : 3, zSchema, zTabname, sqlite3_errstr(rc));
+ }else if( bOk==0 ){
+ *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s",
+ p->bFts4 ? 4 : 3, zSchema, zTabname);
+ }
+ sqlite3Fts3SegmentsClose(p);
+ return SQLITE_OK;
+}
+
+
+
static const sqlite3_module fts3Module = {
- /* iVersion */ 3,
+ /* iVersion */ 4,
/* xCreate */ fts3CreateMethod,
/* xConnect */ fts3ConnectMethod,
/* xBestIndex */ fts3BestIndexMethod,
@@ -185103,6 +188543,7 @@ static const sqlite3_module fts3Module = {
/* xRelease */ fts3ReleaseMethod,
/* xRollbackTo */ fts3RollbackToMethod,
/* xShadowName */ fts3ShadowName,
+ /* xIntegrity */ fts3IntegrityMethod,
};
/*
@@ -187778,7 +191219,8 @@ SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db){
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
int rc; /* Return code */
@@ -191344,7 +194786,8 @@ SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash, void(*xDestr
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
int rc; /* Return code */
@@ -194685,7 +198128,6 @@ SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){
rc = fts3SegmentMerge(p, p->iPrevLangid, i, FTS3_SEGCURSOR_PENDING);
if( rc==SQLITE_DONE ) rc = SQLITE_OK;
}
- sqlite3Fts3PendingTermsClear(p);
/* Determine the auto-incr-merge setting if unknown. If enabled,
** estimate the number of leaf blocks of content to be written
@@ -194707,6 +198149,10 @@ SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){
rc = sqlite3_reset(pStmt);
}
}
+
+ if( rc==SQLITE_OK ){
+ sqlite3Fts3PendingTermsClear(p);
+ }
return rc;
}
@@ -195338,6 +198784,8 @@ static int fts3AppendToNode(
blobGrowBuffer(pPrev, nTerm, &rc);
if( rc!=SQLITE_OK ) return rc;
+ assert( pPrev!=0 );
+ assert( pPrev->a!=0 );
nPrefix = fts3PrefixCompress(pPrev->a, pPrev->n, zTerm, nTerm);
nSuffix = nTerm - nPrefix;
@@ -195394,9 +198842,13 @@ static int fts3IncrmergeAppend(
nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist;
/* If the current block is not empty, and if adding this term/doclist
- ** to the current block would make it larger than Fts3Table.nNodeSize
- ** bytes, write this block out to the database. */
- if( pLeaf->block.n>0 && (pLeaf->block.n + nSpace)>p->nNodeSize ){
+ ** to the current block would make it larger than Fts3Table.nNodeSize bytes,
+ ** and if there is still room for another leaf page, write this block out to
+ ** the database. */
+ if( pLeaf->block.n>0
+ && (pLeaf->block.n + nSpace)>p->nNodeSize
+ && pLeaf->iBlock < (pWriter->iStart + pWriter->nLeafEst)
+ ){
rc = fts3WriteSegment(p, pLeaf->iBlock, pLeaf->block.a, pLeaf->block.n);
pWriter->nWork++;
@@ -195707,6 +199159,7 @@ static int fts3IncrmergeLoad(
for(i=nHeight; i>=0 && rc==SQLITE_OK; i--){
NodeReader reader;
+ memset(&reader, 0, sizeof(reader));
pNode = &pWriter->aNodeWriter[i];
if( pNode->block.a){
@@ -195727,7 +199180,7 @@ static int fts3IncrmergeLoad(
rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock,0);
blobGrowBuffer(&pNode->block,
MAX(nBlock, p->nNodeSize)+FTS3_NODE_PADDING, &rc
- );
+ );
if( rc==SQLITE_OK ){
memcpy(pNode->block.a, aBlock, nBlock);
pNode->block.n = nBlock;
@@ -196577,7 +200030,7 @@ static u64 fts3ChecksumIndex(
int rc;
u64 cksum = 0;
- assert( *pRc==SQLITE_OK );
+ if( *pRc ) return 0;
memset(&filter, 0, sizeof(filter));
memset(&csr, 0, sizeof(csr));
@@ -196644,7 +200097,7 @@ static u64 fts3ChecksumIndex(
** If an error occurs (e.g. an OOM or IO error), return an SQLite error
** code. The final value of *pbOk is undefined in this case.
*/
-static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){
+SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk){
int rc = SQLITE_OK; /* Return code */
u64 cksum1 = 0; /* Checksum based on FTS index contents */
u64 cksum2 = 0; /* Checksum based on %_content contents */
@@ -196722,7 +200175,7 @@ static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){
sqlite3_finalize(pStmt);
}
- *pbOk = (cksum1==cksum2);
+ *pbOk = (rc==SQLITE_OK && cksum1==cksum2);
return rc;
}
@@ -196762,7 +200215,7 @@ static int fts3DoIntegrityCheck(
){
int rc;
int bOk = 0;
- rc = fts3IntegrityCheck(p, &bOk);
+ rc = sqlite3Fts3IntegrityCheck(p, &bOk);
if( rc==SQLITE_OK && bOk==0 ) rc = FTS_CORRUPT_VTAB;
return rc;
}
@@ -196792,8 +200245,11 @@ static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){
rc = fts3DoIncrmerge(p, &zVal[6]);
}else if( nVal>10 && 0==sqlite3_strnicmp(zVal, "automerge=", 10) ){
rc = fts3DoAutoincrmerge(p, &zVal[10]);
+ }else if( nVal==5 && 0==sqlite3_strnicmp(zVal, "flush", 5) ){
+ rc = sqlite3Fts3PendingTermsFlush(p);
+ }
#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
- }else{
+ else{
int v;
if( nVal>9 && 0==sqlite3_strnicmp(zVal, "nodesize=", 9) ){
v = atoi(&zVal[9]);
@@ -196811,8 +200267,8 @@ static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){
if( v>=4 && v<=FTS3_MERGE_COUNT && (v&1)==0 ) p->nMergeCount = v;
rc = SQLITE_OK;
}
-#endif
}
+#endif
return rc;
}
@@ -199733,59 +203189,242 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){
**
******************************************************************************
**
-** This SQLite JSON functions.
+** SQLite JSON functions.
**
** This file began as an extension in ext/misc/json1.c in 2015. That
** extension proved so useful that it has now been moved into the core.
**
-** For the time being, all JSON is stored as pure text. (We might add
-** a JSONB type in the future which stores a binary encoding of JSON in
-** a BLOB, but there is no support for JSONB in the current implementation.
-** This implementation parses JSON text at 250 MB/s, so it is hard to see
-** how JSONB might improve on that.)
+** The original design stored all JSON as pure text, canonical RFC-8259.
+** Support for JSON-5 extensions was added with version 3.42.0 (2023-05-16).
+** All generated JSON text still conforms strictly to RFC-8259, but text
+** with JSON-5 extensions is accepted as input.
+**
+** Beginning with version 3.45.0 (circa 2024-01-01), these routines also
+** accept BLOB values that have JSON encoded using a binary representation
+** called "JSONB". The name JSONB comes from PostgreSQL, however the on-disk
+** format SQLite JSONB is completely different and incompatible with
+** PostgreSQL JSONB.
+**
+** Decoding and interpreting JSONB is still O(N) where N is the size of
+** the input, the same as text JSON. However, the constant of proportionality
+** for JSONB is much smaller due to faster parsing. The size of each
+** element in JSONB is encoded in its header, so there is no need to search
+** for delimiters using persnickety syntax rules. JSONB seems to be about
+** 3x faster than text JSON as a result. JSONB is also tends to be slightly
+** smaller than text JSON, by 5% or 10%, but there are corner cases where
+** JSONB can be slightly larger. So you are not far mistaken to say that
+** a JSONB blob is the same size as the equivalent RFC-8259 text.
+**
+**
+** THE JSONB ENCODING:
+**
+** Every JSON element is encoded in JSONB as a header and a payload.
+** The header is between 1 and 9 bytes in size. The payload is zero
+** or more bytes.
+**
+** The lower 4 bits of the first byte of the header determines the
+** element type:
+**
+** 0: NULL
+** 1: TRUE
+** 2: FALSE
+** 3: INT -- RFC-8259 integer literal
+** 4: INT5 -- JSON5 integer literal
+** 5: FLOAT -- RFC-8259 floating point literal
+** 6: FLOAT5 -- JSON5 floating point literal
+** 7: TEXT -- Text literal acceptable to both SQL and JSON
+** 8: TEXTJ -- Text containing RFC-8259 escapes
+** 9: TEXT5 -- Text containing JSON5 and/or RFC-8259 escapes
+** 10: TEXTRAW -- Text containing unescaped syntax characters
+** 11: ARRAY
+** 12: OBJECT
+**
+** The other three possible values (13-15) are reserved for future
+** enhancements.
+**
+** The upper 4 bits of the first byte determine the size of the header
+** and sometimes also the size of the payload. If X is the first byte
+** of the element and if X>>4 is between 0 and 11, then the payload
+** will be that many bytes in size and the header is exactly one byte
+** in size. Other four values for X>>4 (12-15) indicate that the header
+** is more than one byte in size and that the payload size is determined
+** by the remainder of the header, interpreted as a unsigned big-endian
+** integer.
+**
+** Value of X>>4 Size integer Total header size
+** ------------- -------------------- -----------------
+** 12 1 byte (0-255) 2
+** 13 2 byte (0-65535) 3
+** 14 4 byte (0-4294967295) 5
+** 15 8 byte (0-1.8e19) 9
+**
+** The payload size need not be expressed in its minimal form. For example,
+** if the payload size is 10, the size can be expressed in any of 5 different
+** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by on 0x0a byte,
+** (3) (X>>4)==13 followed by 0x00 and 0x0a, (4) (X>>4)==14 followed by
+** 0x00 0x00 0x00 0x0a, or (5) (X>>4)==15 followed by 7 bytes of 0x00 and
+** a single byte of 0x0a. The shorter forms are preferred, of course, but
+** sometimes when generating JSONB, the payload size is not known in advance
+** and it is convenient to reserve sufficient header space to cover the
+** largest possible payload size and then come back later and patch up
+** the size when it becomes known, resulting in a non-minimal encoding.
+**
+** The value (X>>4)==15 is not actually used in the current implementation
+** (as SQLite is currently unable handle BLOBs larger than about 2GB)
+** but is included in the design to allow for future enhancements.
+**
+** The payload follows the header. NULL, TRUE, and FALSE have no payload and
+** their payload size must always be zero. The payload for INT, INT5,
+** FLOAT, FLOAT5, TEXT, TEXTJ, TEXT5, and TEXTROW is text. Note that the
+** "..." or '...' delimiters are omitted from the various text encodings.
+** The payload for ARRAY and OBJECT is a list of additional elements that
+** are the content for the array or object. The payload for an OBJECT
+** must be an even number of elements. The first element of each pair is
+** the label and must be of type TEXT, TEXTJ, TEXT5, or TEXTRAW.
+**
+** A valid JSONB blob consists of a single element, as described above.
+** Usually this will be an ARRAY or OBJECT element which has many more
+** elements as its content. But the overall blob is just a single element.
+**
+** Input validation for JSONB blobs simply checks that the element type
+** code is between 0 and 12 and that the total size of the element
+** (header plus payload) is the same as the size of the BLOB. If those
+** checks are true, the BLOB is assumed to be JSONB and processing continues.
+** Errors are only raised if some other miscoding is discovered during
+** processing.
+**
+** Additional information can be found in the doc/jsonb.md file of the
+** canonical SQLite source tree.
*/
#ifndef SQLITE_OMIT_JSON
/* #include "sqliteInt.h" */
+/* JSONB element types
+*/
+#define JSONB_NULL 0 /* "null" */
+#define JSONB_TRUE 1 /* "true" */
+#define JSONB_FALSE 2 /* "false" */
+#define JSONB_INT 3 /* integer acceptable to JSON and SQL */
+#define JSONB_INT5 4 /* integer in 0x000 notation */
+#define JSONB_FLOAT 5 /* float acceptable to JSON and SQL */
+#define JSONB_FLOAT5 6 /* float with JSON5 extensions */
+#define JSONB_TEXT 7 /* Text compatible with both JSON and SQL */
+#define JSONB_TEXTJ 8 /* Text with JSON escapes */
+#define JSONB_TEXT5 9 /* Text with JSON-5 escape */
+#define JSONB_TEXTRAW 10 /* SQL text that needs escaping for JSON */
+#define JSONB_ARRAY 11 /* An array */
+#define JSONB_OBJECT 12 /* An object */
+
+/* Human-readable names for the JSONB values. The index for each
+** string must correspond to the JSONB_* integer above.
+*/
+static const char * const jsonbType[] = {
+ "null", "true", "false", "integer", "integer",
+ "real", "real", "text", "text", "text",
+ "text", "array", "object", "", "", "", ""
+};
+
/*
** Growing our own isspace() routine this way is twice as fast as
** the library isspace() function, resulting in a 7% overall performance
-** increase for the parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os).
+** increase for the text-JSON parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os).
*/
static const char jsonIsSpace[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+#define jsonIsspace(x) (jsonIsSpace[(unsigned char)x])
+
+/*
+** The set of all space characters recognized by jsonIsspace().
+** Useful as the second argument to strspn().
+*/
+static const char jsonSpaces[] = "\011\012\015\040";
+
+/*
+** Characters that are special to JSON. Control characters,
+** '"' and '\\' and '\''. Actually, '\'' is not special to
+** canonical JSON, but it is special in JSON-5, so we include
+** it in the set of special characters.
+*/
+static const char jsonIsOk[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
-#define fast_isspace(x) (jsonIsSpace[(unsigned char)x])
-
-#if !defined(SQLITE_DEBUG) && !defined(SQLITE_COVERAGE_TEST)
-# define VVA(X)
-#else
-# define VVA(X) X
-#endif
/* Objects */
+typedef struct JsonCache JsonCache;
typedef struct JsonString JsonString;
-typedef struct JsonNode JsonNode;
typedef struct JsonParse JsonParse;
+/*
+** Magic number used for the JSON parse cache in sqlite3_get_auxdata()
+*/
+#define JSON_CACHE_ID (-429938) /* Cache entry */
+#define JSON_CACHE_SIZE 4 /* Max number of cache entries */
+
+/*
+** jsonUnescapeOneChar() returns this invalid code point if it encounters
+** a syntax error.
+*/
+#define JSON_INVALID_CHAR 0x99999
+
+/* A cache mapping JSON text into JSONB blobs.
+**
+** Each cache entry is a JsonParse object with the following restrictions:
+**
+** * The bReadOnly flag must be set
+**
+** * The aBlob[] array must be owned by the JsonParse object. In other
+** words, nBlobAlloc must be non-zero.
+**
+** * eEdit and delta must be zero.
+**
+** * zJson must be an RCStr. In other words bJsonIsRCStr must be true.
+*/
+struct JsonCache {
+ sqlite3 *db; /* Database connection */
+ int nUsed; /* Number of active entries in the cache */
+ JsonParse *a[JSON_CACHE_SIZE]; /* One line for each cache entry */
+};
+
/* An instance of this object represents a JSON string
** under construction. Really, this is a generic string accumulator
** that can be and is used to create strings other than JSON.
+**
+** If the generated string is longer than will fit into the zSpace[] buffer,
+** then it will be an RCStr string. This aids with caching of large
+** JSON strings.
*/
struct JsonString {
sqlite3_context *pCtx; /* Function context - put error messages here */
@@ -199793,76 +203432,75 @@ struct JsonString {
u64 nAlloc; /* Bytes of storage available in zBuf[] */
u64 nUsed; /* Bytes of zBuf[] currently used */
u8 bStatic; /* True if zBuf is static space */
- u8 bErr; /* True if an error has been encountered */
+ u8 eErr; /* True if an error has been encountered */
char zSpace[100]; /* Initial static space */
};
-/* JSON type values
-*/
-#define JSON_NULL 0
-#define JSON_TRUE 1
-#define JSON_FALSE 2
-#define JSON_INT 3
-#define JSON_REAL 4
-#define JSON_STRING 5
-#define JSON_ARRAY 6
-#define JSON_OBJECT 7
+/* Allowed values for JsonString.eErr */
+#define JSTRING_OOM 0x01 /* Out of memory */
+#define JSTRING_MALFORMED 0x02 /* Malformed JSONB */
+#define JSTRING_ERR 0x04 /* Error already sent to sqlite3_result */
-/* The "subtype" set for JSON values */
+/* The "subtype" set for text JSON values passed through using
+** sqlite3_result_subtype() and sqlite3_value_subtype().
+*/
#define JSON_SUBTYPE 74 /* Ascii for "J" */
/*
-** Names of the various JSON types:
+** Bit values for the flags passed into various SQL function implementations
+** via the sqlite3_user_data() value.
*/
-static const char * const jsonType[] = {
- "null", "true", "false", "integer", "real", "text", "array", "object"
-};
-
-/* Bit values for the JsonNode.jnFlag field
-*/
-#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */
-#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */
-#define JNODE_REMOVE 0x04 /* Do not output */
-#define JNODE_REPLACE 0x08 /* Replace with JsonNode.u.iReplace */
-#define JNODE_PATCH 0x10 /* Patch with JsonNode.u.pPatch */
-#define JNODE_APPEND 0x20 /* More ARRAY/OBJECT entries at u.iAppend */
-#define JNODE_LABEL 0x40 /* Is a label of an object */
-#define JNODE_JSON5 0x80 /* Node contains JSON5 enhancements */
-
+#define JSON_JSON 0x01 /* Result is always JSON */
+#define JSON_SQL 0x02 /* Result is always SQL */
+#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */
+#define JSON_ISSET 0x04 /* json_set(), not json_insert() */
+#define JSON_BLOB 0x08 /* Use the BLOB output format */
-/* A single node of parsed JSON
-*/
-struct JsonNode {
- u8 eType; /* One of the JSON_ type values */
- u8 jnFlags; /* JNODE flags */
- u8 eU; /* Which union element to use */
- u32 n; /* Bytes of content, or number of sub-nodes */
- union {
- const char *zJContent; /* 1: Content for INT, REAL, and STRING */
- u32 iAppend; /* 2: More terms for ARRAY and OBJECT */
- u32 iKey; /* 3: Key for ARRAY objects in json_tree() */
- u32 iReplace; /* 4: Replacement content for JNODE_REPLACE */
- JsonNode *pPatch; /* 5: Node chain of patch for JNODE_PATCH */
- } u;
-};
-/* A completely parsed JSON string
+/* A parsed JSON value. Lifecycle:
+**
+** 1. JSON comes in and is parsed into a JSONB value in aBlob. The
+** original text is stored in zJson. This step is skipped if the
+** input is JSONB instead of text JSON.
+**
+** 2. The aBlob[] array is searched using the JSON path notation, if needed.
+**
+** 3. Zero or more changes are made to aBlob[] (via json_remove() or
+** json_replace() or json_patch() or similar).
+**
+** 4. New JSON text is generated from the aBlob[] for output. This step
+** is skipped if the function is one of the jsonb_* functions that
+** returns JSONB instead of text JSON.
*/
struct JsonParse {
- u32 nNode; /* Number of slots of aNode[] used */
- u32 nAlloc; /* Number of slots of aNode[] allocated */
- JsonNode *aNode; /* Array of nodes containing the parse */
- const char *zJson; /* Original JSON string */
- u32 *aUp; /* Index of parent of each node */
+ u8 *aBlob; /* JSONB representation of JSON value */
+ u32 nBlob; /* Bytes of aBlob[] actually used */
+ u32 nBlobAlloc; /* Bytes allocated to aBlob[]. 0 if aBlob is external */
+ char *zJson; /* Json text used for parsing */
+ sqlite3 *db; /* The database connection to which this object belongs */
+ int nJson; /* Length of the zJson string in bytes */
+ u32 nJPRef; /* Number of references to this object */
+ u32 iErr; /* Error location in zJson[] */
u16 iDepth; /* Nesting depth */
u8 nErr; /* Number of errors seen */
u8 oom; /* Set to true if out of memory */
+ u8 bJsonIsRCStr; /* True if zJson is an RCStr */
u8 hasNonstd; /* True if input uses non-standard features like JSON5 */
- int nJson; /* Length of the zJson string in bytes */
- u32 iErr; /* Error location in zJson[] */
- u32 iHold; /* Replace cache line with the lowest iHold value */
+ u8 bReadOnly; /* Do not modify. */
+ /* Search and edit information. See jsonLookupStep() */
+ u8 eEdit; /* Edit operation to apply */
+ int delta; /* Size change due to the edit */
+ u32 nIns; /* Number of bytes to insert */
+ u32 iLabel; /* Location of label if search landed on an object value */
+ u8 *aIns; /* Content to be inserted */
};
+/* Allowed values for JsonParse.eEdit */
+#define JEDIT_DEL 1 /* Delete if exists */
+#define JEDIT_REPL 2 /* Overwrite if exists */
+#define JEDIT_INS 3 /* Insert if not exists */
+#define JEDIT_SET 4 /* Insert or overwrite */
+
/*
** Maximum nesting depth of JSON for this implementation.
**
@@ -199870,15 +203508,151 @@ struct JsonParse {
** descent parser. A depth of 1000 is far deeper than any sane JSON
** should go. Historical note: This limit was 2000 prior to version 3.42.0
*/
-#define JSON_MAX_DEPTH 1000
+#ifndef SQLITE_JSON_MAX_DEPTH
+# define JSON_MAX_DEPTH 1000
+#else
+# define JSON_MAX_DEPTH SQLITE_JSON_MAX_DEPTH
+#endif
+
+/*
+** Allowed values for the flgs argument to jsonParseFuncArg();
+*/
+#define JSON_EDITABLE 0x01 /* Generate a writable JsonParse object */
+#define JSON_KEEPERROR 0x02 /* Return non-NULL even if there is an error */
+
+/**************************************************************************
+** Forward references
+**************************************************************************/
+static void jsonReturnStringAsBlob(JsonString*);
+static int jsonFuncArgMightBeBinary(sqlite3_value *pJson);
+static u32 jsonTranslateBlobToText(const JsonParse*,u32,JsonString*);
+static void jsonReturnParse(sqlite3_context*,JsonParse*);
+static JsonParse *jsonParseFuncArg(sqlite3_context*,sqlite3_value*,u32);
+static void jsonParseFree(JsonParse*);
+static u32 jsonbPayloadSize(const JsonParse*, u32, u32*);
+static u32 jsonUnescapeOneChar(const char*, u32, u32*);
+
+/**************************************************************************
+** Utility routines for dealing with JsonCache objects
+**************************************************************************/
+
+/*
+** Free a JsonCache object.
+*/
+static void jsonCacheDelete(JsonCache *p){
+ int i;
+ for(i=0; i<p->nUsed; i++){
+ jsonParseFree(p->a[i]);
+ }
+ sqlite3DbFree(p->db, p);
+}
+static void jsonCacheDeleteGeneric(void *p){
+ jsonCacheDelete((JsonCache*)p);
+}
+
+/*
+** Insert a new entry into the cache. If the cache is full, expel
+** the least recently used entry. Return SQLITE_OK on success or a
+** result code otherwise.
+**
+** Cache entries are stored in age order, oldest first.
+*/
+static int jsonCacheInsert(
+ sqlite3_context *ctx, /* The SQL statement context holding the cache */
+ JsonParse *pParse /* The parse object to be added to the cache */
+){
+ JsonCache *p;
+
+ assert( pParse->zJson!=0 );
+ assert( pParse->bJsonIsRCStr );
+ assert( pParse->delta==0 );
+ p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID);
+ if( p==0 ){
+ sqlite3 *db = sqlite3_context_db_handle(ctx);
+ p = sqlite3DbMallocZero(db, sizeof(*p));
+ if( p==0 ) return SQLITE_NOMEM;
+ p->db = db;
+ sqlite3_set_auxdata(ctx, JSON_CACHE_ID, p, jsonCacheDeleteGeneric);
+ p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID);
+ if( p==0 ) return SQLITE_NOMEM;
+ }
+ if( p->nUsed >= JSON_CACHE_SIZE ){
+ jsonParseFree(p->a[0]);
+ memmove(p->a, &p->a[1], (JSON_CACHE_SIZE-1)*sizeof(p->a[0]));
+ p->nUsed = JSON_CACHE_SIZE-1;
+ }
+ assert( pParse->nBlobAlloc>0 );
+ pParse->eEdit = 0;
+ pParse->nJPRef++;
+ pParse->bReadOnly = 1;
+ p->a[p->nUsed] = pParse;
+ p->nUsed++;
+ return SQLITE_OK;
+}
+
+/*
+** Search for a cached translation the json text supplied by pArg. Return
+** the JsonParse object if found. Return NULL if not found.
+**
+** When a match if found, the matching entry is moved to become the
+** most-recently used entry if it isn't so already.
+**
+** The JsonParse object returned still belongs to the Cache and might
+** be deleted at any moment. If the caller whants the JsonParse to
+** linger, it needs to increment the nPJRef reference counter.
+*/
+static JsonParse *jsonCacheSearch(
+ sqlite3_context *ctx, /* The SQL statement context holding the cache */
+ sqlite3_value *pArg /* Function argument containing SQL text */
+){
+ JsonCache *p;
+ int i;
+ const char *zJson;
+ int nJson;
+
+ if( sqlite3_value_type(pArg)!=SQLITE_TEXT ){
+ return 0;
+ }
+ zJson = (const char*)sqlite3_value_text(pArg);
+ if( zJson==0 ) return 0;
+ nJson = sqlite3_value_bytes(pArg);
+
+ p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID);
+ if( p==0 ){
+ return 0;
+ }
+ for(i=0; i<p->nUsed; i++){
+ if( p->a[i]->zJson==zJson ) break;
+ }
+ if( i>=p->nUsed ){
+ for(i=0; i<p->nUsed; i++){
+ if( p->a[i]->nJson!=nJson ) continue;
+ if( memcmp(p->a[i]->zJson, zJson, nJson)==0 ) break;
+ }
+ }
+ if( i<p->nUsed ){
+ if( i<p->nUsed-1 ){
+ /* Make the matching entry the most recently used entry */
+ JsonParse *tmp = p->a[i];
+ memmove(&p->a[i], &p->a[i+1], (p->nUsed-i-1)*sizeof(tmp));
+ p->a[p->nUsed-1] = tmp;
+ i = p->nUsed - 1;
+ }
+ assert( p->a[i]->delta==0 );
+ return p->a[i];
+ }else{
+ return 0;
+ }
+}
/**************************************************************************
** Utility routines for dealing with JsonString objects
**************************************************************************/
-/* Set the JsonString object to an empty string
+/* Turn uninitialized bulk memory into a valid JsonString object
+** holding a zero-length string.
*/
-static void jsonZero(JsonString *p){
+static void jsonStringZero(JsonString *p){
p->zBuf = p->zSpace;
p->nAlloc = sizeof(p->zSpace);
p->nUsed = 0;
@@ -199887,53 +203661,51 @@ static void jsonZero(JsonString *p){
/* Initialize the JsonString object
*/
-static void jsonInit(JsonString *p, sqlite3_context *pCtx){
+static void jsonStringInit(JsonString *p, sqlite3_context *pCtx){
p->pCtx = pCtx;
- p->bErr = 0;
- jsonZero(p);
+ p->eErr = 0;
+ jsonStringZero(p);
}
-
/* Free all allocated memory and reset the JsonString object back to its
** initial state.
*/
-static void jsonReset(JsonString *p){
- if( !p->bStatic ) sqlite3_free(p->zBuf);
- jsonZero(p);
+static void jsonStringReset(JsonString *p){
+ if( !p->bStatic ) sqlite3RCStrUnref(p->zBuf);
+ jsonStringZero(p);
}
-
/* Report an out-of-memory (OOM) condition
*/
-static void jsonOom(JsonString *p){
- p->bErr = 1;
- sqlite3_result_error_nomem(p->pCtx);
- jsonReset(p);
+static void jsonStringOom(JsonString *p){
+ p->eErr |= JSTRING_OOM;
+ if( p->pCtx ) sqlite3_result_error_nomem(p->pCtx);
+ jsonStringReset(p);
}
/* Enlarge pJson->zBuf so that it can hold at least N more bytes.
** Return zero on success. Return non-zero on an OOM error
*/
-static int jsonGrow(JsonString *p, u32 N){
+static int jsonStringGrow(JsonString *p, u32 N){
u64 nTotal = N<p->nAlloc ? p->nAlloc*2 : p->nAlloc+N+10;
char *zNew;
if( p->bStatic ){
- if( p->bErr ) return 1;
- zNew = sqlite3_malloc64(nTotal);
+ if( p->eErr ) return 1;
+ zNew = sqlite3RCStrNew(nTotal);
if( zNew==0 ){
- jsonOom(p);
+ jsonStringOom(p);
return SQLITE_NOMEM;
}
memcpy(zNew, p->zBuf, (size_t)p->nUsed);
p->zBuf = zNew;
p->bStatic = 0;
}else{
- zNew = sqlite3_realloc64(p->zBuf, nTotal);
- if( zNew==0 ){
- jsonOom(p);
+ p->zBuf = sqlite3RCStrResize(p->zBuf, nTotal);
+ if( p->zBuf==0 ){
+ p->eErr |= JSTRING_OOM;
+ jsonStringZero(p);
return SQLITE_NOMEM;
}
- p->zBuf = zNew;
}
p->nAlloc = nTotal;
return SQLITE_OK;
@@ -199941,18 +203713,41 @@ static int jsonGrow(JsonString *p, u32 N){
/* Append N bytes from zIn onto the end of the JsonString string.
*/
-static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){
- if( N==0 ) return;
- if( (N+p->nUsed >= p->nAlloc) && jsonGrow(p,N)!=0 ) return;
+static SQLITE_NOINLINE void jsonStringExpandAndAppend(
+ JsonString *p,
+ const char *zIn,
+ u32 N
+){
+ assert( N>0 );
+ if( jsonStringGrow(p,N) ) return;
memcpy(p->zBuf+p->nUsed, zIn, N);
p->nUsed += N;
}
+static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){
+ if( N==0 ) return;
+ if( N+p->nUsed >= p->nAlloc ){
+ jsonStringExpandAndAppend(p,zIn,N);
+ }else{
+ memcpy(p->zBuf+p->nUsed, zIn, N);
+ p->nUsed += N;
+ }
+}
+static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){
+ assert( N>0 );
+ if( N+p->nUsed >= p->nAlloc ){
+ jsonStringExpandAndAppend(p,zIn,N);
+ }else{
+ memcpy(p->zBuf+p->nUsed, zIn, N);
+ p->nUsed += N;
+ }
+}
+
/* Append formatted text (not to exceed N bytes) to the JsonString.
*/
static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){
va_list ap;
- if( (p->nUsed + N >= p->nAlloc) && jsonGrow(p, N) ) return;
+ if( (p->nUsed + N >= p->nAlloc) && jsonStringGrow(p, N) ) return;
va_start(ap, zFormat);
sqlite3_vsnprintf(N, p->zBuf+p->nUsed, zFormat, ap);
va_end(ap);
@@ -199961,10 +203756,38 @@ static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){
/* Append a single character
*/
-static void jsonAppendChar(JsonString *p, char c){
- if( p->nUsed>=p->nAlloc && jsonGrow(p,1)!=0 ) return;
+static SQLITE_NOINLINE void jsonAppendCharExpand(JsonString *p, char c){
+ if( jsonStringGrow(p,1) ) return;
p->zBuf[p->nUsed++] = c;
}
+static void jsonAppendChar(JsonString *p, char c){
+ if( p->nUsed>=p->nAlloc ){
+ jsonAppendCharExpand(p,c);
+ }else{
+ p->zBuf[p->nUsed++] = c;
+ }
+}
+
+/* Remove a single character from the end of the string
+*/
+static void jsonStringTrimOneChar(JsonString *p){
+ if( p->eErr==0 ){
+ assert( p->nUsed>0 );
+ p->nUsed--;
+ }
+}
+
+
+/* Make sure there is a zero terminator on p->zBuf[]
+**
+** Return true on success. Return false if an OOM prevents this
+** from happening.
+*/
+static int jsonStringTerminate(JsonString *p){
+ jsonAppendChar(p, 0);
+ jsonStringTrimOneChar(p);
+ return p->eErr==0;
+}
/* Append a comma separator to the output buffer, if the previous
** character is not '[' or '{'.
@@ -199973,25 +203796,76 @@ static void jsonAppendSeparator(JsonString *p){
char c;
if( p->nUsed==0 ) return;
c = p->zBuf[p->nUsed-1];
- if( c!='[' && c!='{' ) jsonAppendChar(p, ',');
+ if( c=='[' || c=='{' ) return;
+ jsonAppendChar(p, ',');
}
/* Append the N-byte string in zIn to the end of the JsonString string
-** under construction. Enclose the string in "..." and escape
-** any double-quotes or backslash characters contained within the
+** under construction. Enclose the string in double-quotes ("...") and
+** escape any double-quotes or backslash characters contained within the
** string.
+**
+** This routine is a high-runner. There is a measurable performance
+** increase associated with unwinding the jsonIsOk[] loop.
*/
static void jsonAppendString(JsonString *p, const char *zIn, u32 N){
- u32 i;
- if( zIn==0 || ((N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0) ) return;
+ u32 k;
+ u8 c;
+ const u8 *z = (const u8*)zIn;
+ if( z==0 ) return;
+ if( (N+p->nUsed+2 >= p->nAlloc) && jsonStringGrow(p,N+2)!=0 ) return;
p->zBuf[p->nUsed++] = '"';
- for(i=0; i<N; i++){
- unsigned char c = ((unsigned const char*)zIn)[i];
+ while( 1 /*exit-by-break*/ ){
+ k = 0;
+ /* The following while() is the 4-way unwound equivalent of
+ **
+ ** while( k<N && jsonIsOk[z[k]] ){ k++; }
+ */
+ while( 1 /* Exit by break */ ){
+ if( k+3>=N ){
+ while( k<N && jsonIsOk[z[k]] ){ k++; }
+ break;
+ }
+ if( !jsonIsOk[z[k]] ){
+ break;
+ }
+ if( !jsonIsOk[z[k+1]] ){
+ k += 1;
+ break;
+ }
+ if( !jsonIsOk[z[k+2]] ){
+ k += 2;
+ break;
+ }
+ if( !jsonIsOk[z[k+3]] ){
+ k += 3;
+ break;
+ }else{
+ k += 4;
+ }
+ }
+ if( k>=N ){
+ if( k>0 ){
+ memcpy(&p->zBuf[p->nUsed], z, k);
+ p->nUsed += k;
+ }
+ break;
+ }
+ if( k>0 ){
+ memcpy(&p->zBuf[p->nUsed], z, k);
+ p->nUsed += k;
+ z += k;
+ N -= k;
+ }
+ c = z[0];
if( c=='"' || c=='\\' ){
json_simple_escape:
- if( (p->nUsed+N+3-i > p->nAlloc) && jsonGrow(p,N+3-i)!=0 ) return;
+ if( (p->nUsed+N+3 > p->nAlloc) && jsonStringGrow(p,N+3)!=0 ) return;
p->zBuf[p->nUsed++] = '\\';
- }else if( c<=0x1f ){
+ p->zBuf[p->nUsed++] = c;
+ }else if( c=='\'' ){
+ p->zBuf[p->nUsed++] = c;
+ }else{
static const char aSpecial[] = {
0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
@@ -200002,158 +203876,37 @@ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){
assert( aSpecial['\n']=='n' );
assert( aSpecial['\r']=='r' );
assert( aSpecial['\t']=='t' );
+ assert( c>=0 && c<sizeof(aSpecial) );
if( aSpecial[c] ){
c = aSpecial[c];
goto json_simple_escape;
}
- if( (p->nUsed+N+7+i > p->nAlloc) && jsonGrow(p,N+7-i)!=0 ) return;
+ if( (p->nUsed+N+7 > p->nAlloc) && jsonStringGrow(p,N+7)!=0 ) return;
p->zBuf[p->nUsed++] = '\\';
p->zBuf[p->nUsed++] = 'u';
p->zBuf[p->nUsed++] = '0';
p->zBuf[p->nUsed++] = '0';
- p->zBuf[p->nUsed++] = '0' + (c>>4);
- c = "0123456789abcdef"[c&0xf];
+ p->zBuf[p->nUsed++] = "0123456789abcdef"[c>>4];
+ p->zBuf[p->nUsed++] = "0123456789abcdef"[c&0xf];
}
- p->zBuf[p->nUsed++] = c;
+ z++;
+ N--;
}
p->zBuf[p->nUsed++] = '"';
assert( p->nUsed<p->nAlloc );
}
/*
-** The zIn[0..N] string is a JSON5 string literal. Append to p a translation
-** of the string literal that standard JSON and that omits all JSON5
-** features.
-*/
-static void jsonAppendNormalizedString(JsonString *p, const char *zIn, u32 N){
- u32 i;
- jsonAppendChar(p, '"');
- zIn++;
- N -= 2;
- while( N>0 ){
- for(i=0; i<N && zIn[i]!='\\'; i++){}
- if( i>0 ){
- jsonAppendRaw(p, zIn, i);
- zIn += i;
- N -= i;
- if( N==0 ) break;
- }
- assert( zIn[0]=='\\' );
- switch( (u8)zIn[1] ){
- case '\'':
- jsonAppendChar(p, '\'');
- break;
- case 'v':
- jsonAppendRaw(p, "\\u0009", 6);
- break;
- case 'x':
- jsonAppendRaw(p, "\\u00", 4);
- jsonAppendRaw(p, &zIn[2], 2);
- zIn += 2;
- N -= 2;
- break;
- case '0':
- jsonAppendRaw(p, "\\u0000", 6);
- break;
- case '\r':
- if( zIn[2]=='\n' ){
- zIn++;
- N--;
- }
- break;
- case '\n':
- break;
- case 0xe2:
- assert( N>=4 );
- assert( 0x80==(u8)zIn[2] );
- assert( 0xa8==(u8)zIn[3] || 0xa9==(u8)zIn[3] );
- zIn += 2;
- N -= 2;
- break;
- default:
- jsonAppendRaw(p, zIn, 2);
- break;
- }
- zIn += 2;
- N -= 2;
- }
- jsonAppendChar(p, '"');
-}
-
-/*
-** The zIn[0..N] string is a JSON5 integer literal. Append to p a translation
-** of the string literal that standard JSON and that omits all JSON5
-** features.
+** Append an sqlite3_value (such as a function parameter) to the JSON
+** string under construction in p.
*/
-static void jsonAppendNormalizedInt(JsonString *p, const char *zIn, u32 N){
- if( zIn[0]=='+' ){
- zIn++;
- N--;
- }else if( zIn[0]=='-' ){
- jsonAppendChar(p, '-');
- zIn++;
- N--;
- }
- if( zIn[0]=='0' && (zIn[1]=='x' || zIn[1]=='X') ){
- sqlite3_int64 i = 0;
- int rc = sqlite3DecOrHexToI64(zIn, &i);
- if( rc<=1 ){
- jsonPrintf(100,p,"%lld",i);
- }else{
- assert( rc==2 );
- jsonAppendRaw(p, "9.0e999", 7);
- }
- return;
- }
- jsonAppendRaw(p, zIn, N);
-}
-
-/*
-** The zIn[0..N] string is a JSON5 real literal. Append to p a translation
-** of the string literal that standard JSON and that omits all JSON5
-** features.
-*/
-static void jsonAppendNormalizedReal(JsonString *p, const char *zIn, u32 N){
- u32 i;
- if( zIn[0]=='+' ){
- zIn++;
- N--;
- }else if( zIn[0]=='-' ){
- jsonAppendChar(p, '-');
- zIn++;
- N--;
- }
- if( zIn[0]=='.' ){
- jsonAppendChar(p, '0');
- }
- for(i=0; i<N; i++){
- if( zIn[i]=='.' && (i+1==N || !sqlite3Isdigit(zIn[i+1])) ){
- i++;
- jsonAppendRaw(p, zIn, i);
- zIn += i;
- N -= i;
- jsonAppendChar(p, '0');
- break;
- }
- }
- if( N>0 ){
- jsonAppendRaw(p, zIn, N);
- }
-}
-
-
-
-/*
-** Append a function parameter value to the JSON string under
-** construction.
-*/
-static void jsonAppendValue(
+static void jsonAppendSqlValue(
JsonString *p, /* Append to this JSON string */
sqlite3_value *pValue /* Value to append */
){
switch( sqlite3_value_type(pValue) ){
case SQLITE_NULL: {
- jsonAppendRaw(p, "null", 4);
+ jsonAppendRawNZ(p, "null", 4);
break;
}
case SQLITE_FLOAT: {
@@ -200177,205 +203930,127 @@ static void jsonAppendValue(
break;
}
default: {
- if( p->bErr==0 ){
+ if( jsonFuncArgMightBeBinary(pValue) ){
+ JsonParse px;
+ memset(&px, 0, sizeof(px));
+ px.aBlob = (u8*)sqlite3_value_blob(pValue);
+ px.nBlob = sqlite3_value_bytes(pValue);
+ jsonTranslateBlobToText(&px, 0, p);
+ }else if( p->eErr==0 ){
sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1);
- p->bErr = 2;
- jsonReset(p);
+ p->eErr = JSTRING_ERR;
+ jsonStringReset(p);
}
break;
}
}
}
-
-/* Make the JSON in p the result of the SQL function.
+/* Make the text in p (which is probably a generated JSON text string)
+** the result of the SQL function.
+**
+** The JsonString is reset.
+**
+** If pParse and ctx are both non-NULL, then the SQL string in p is
+** loaded into the zJson field of the pParse object as a RCStr and the
+** pParse is added to the cache.
*/
-static void jsonResult(JsonString *p){
- if( p->bErr==0 ){
- sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed,
- p->bStatic ? SQLITE_TRANSIENT : sqlite3_free,
- SQLITE_UTF8);
- jsonZero(p);
+static void jsonReturnString(
+ JsonString *p, /* String to return */
+ JsonParse *pParse, /* JSONB source or NULL */
+ sqlite3_context *ctx /* Where to cache */
+){
+ assert( (pParse!=0)==(ctx!=0) );
+ assert( ctx==0 || ctx==p->pCtx );
+ if( p->eErr==0 ){
+ int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(p->pCtx));
+ if( flags & JSON_BLOB ){
+ jsonReturnStringAsBlob(p);
+ }else if( p->bStatic ){
+ sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed,
+ SQLITE_TRANSIENT, SQLITE_UTF8);
+ }else if( jsonStringTerminate(p) ){
+ if( pParse && pParse->bJsonIsRCStr==0 && pParse->nBlobAlloc>0 ){
+ int rc;
+ pParse->zJson = sqlite3RCStrRef(p->zBuf);
+ pParse->nJson = p->nUsed;
+ pParse->bJsonIsRCStr = 1;
+ rc = jsonCacheInsert(ctx, pParse);
+ if( rc==SQLITE_NOMEM ){
+ sqlite3_result_error_nomem(ctx);
+ jsonStringReset(p);
+ return;
+ }
+ }
+ sqlite3_result_text64(p->pCtx, sqlite3RCStrRef(p->zBuf), p->nUsed,
+ sqlite3RCStrUnref,
+ SQLITE_UTF8);
+ }else{
+ sqlite3_result_error_nomem(p->pCtx);
+ }
+ }else if( p->eErr & JSTRING_OOM ){
+ sqlite3_result_error_nomem(p->pCtx);
+ }else if( p->eErr & JSTRING_MALFORMED ){
+ sqlite3_result_error(p->pCtx, "malformed JSON", -1);
}
- assert( p->bStatic );
+ jsonStringReset(p);
}
/**************************************************************************
-** Utility routines for dealing with JsonNode and JsonParse objects
+** Utility routines for dealing with JsonParse objects
**************************************************************************/
/*
-** Return the number of consecutive JsonNode slots need to represent
-** the parsed JSON at pNode. The minimum answer is 1. For ARRAY and
-** OBJECT types, the number might be larger.
-**
-** Appended elements are not counted. The value returned is the number
-** by which the JsonNode counter should increment in order to go to the
-** next peer value.
-*/
-static u32 jsonNodeSize(JsonNode *pNode){
- return pNode->eType>=JSON_ARRAY ? pNode->n+1 : 1;
-}
-
-/*
** Reclaim all memory allocated by a JsonParse object. But do not
** delete the JsonParse object itself.
*/
static void jsonParseReset(JsonParse *pParse){
- sqlite3_free(pParse->aNode);
- pParse->aNode = 0;
- pParse->nNode = 0;
- pParse->nAlloc = 0;
- sqlite3_free(pParse->aUp);
- pParse->aUp = 0;
+ assert( pParse->nJPRef<=1 );
+ if( pParse->bJsonIsRCStr ){
+ sqlite3RCStrUnref(pParse->zJson);
+ pParse->zJson = 0;
+ pParse->nJson = 0;
+ pParse->bJsonIsRCStr = 0;
+ }
+ if( pParse->nBlobAlloc ){
+ sqlite3DbFree(pParse->db, pParse->aBlob);
+ pParse->aBlob = 0;
+ pParse->nBlob = 0;
+ pParse->nBlobAlloc = 0;
+ }
}
/*
-** Free a JsonParse object that was obtained from sqlite3_malloc().
+** Decrement the reference count on the JsonParse object. When the
+** count reaches zero, free the object.
*/
static void jsonParseFree(JsonParse *pParse){
- jsonParseReset(pParse);
- sqlite3_free(pParse);
-}
-
-/*
-** Convert the JsonNode pNode into a pure JSON string and
-** append to pOut. Subsubstructure is also included. Return
-** the number of JsonNode objects that are encoded.
-*/
-static void jsonRenderNode(
- JsonNode *pNode, /* The node to render */
- JsonString *pOut, /* Write JSON here */
- sqlite3_value **aReplace /* Replacement values */
-){
- assert( pNode!=0 );
- if( pNode->jnFlags & (JNODE_REPLACE|JNODE_PATCH) ){
- if( (pNode->jnFlags & JNODE_REPLACE)!=0 && ALWAYS(aReplace!=0) ){
- assert( pNode->eU==4 );
- jsonAppendValue(pOut, aReplace[pNode->u.iReplace]);
- return;
- }
- assert( pNode->eU==5 );
- pNode = pNode->u.pPatch;
- }
- switch( pNode->eType ){
- default: {
- assert( pNode->eType==JSON_NULL );
- jsonAppendRaw(pOut, "null", 4);
- break;
- }
- case JSON_TRUE: {
- jsonAppendRaw(pOut, "true", 4);
- break;
- }
- case JSON_FALSE: {
- jsonAppendRaw(pOut, "false", 5);
- break;
- }
- case JSON_STRING: {
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_RAW ){
- if( pNode->jnFlags & JNODE_LABEL ){
- jsonAppendChar(pOut, '"');
- jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
- jsonAppendChar(pOut, '"');
- }else{
- jsonAppendString(pOut, pNode->u.zJContent, pNode->n);
- }
- }else if( pNode->jnFlags & JNODE_JSON5 ){
- jsonAppendNormalizedString(pOut, pNode->u.zJContent, pNode->n);
- }else{
- jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
- }
- break;
- }
- case JSON_REAL: {
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_JSON5 ){
- jsonAppendNormalizedReal(pOut, pNode->u.zJContent, pNode->n);
- }else{
- jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
- }
- break;
- }
- case JSON_INT: {
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_JSON5 ){
- jsonAppendNormalizedInt(pOut, pNode->u.zJContent, pNode->n);
- }else{
- jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
- }
- break;
- }
- case JSON_ARRAY: {
- u32 j = 1;
- jsonAppendChar(pOut, '[');
- for(;;){
- while( j<=pNode->n ){
- if( (pNode[j].jnFlags & JNODE_REMOVE)==0 ){
- jsonAppendSeparator(pOut);
- jsonRenderNode(&pNode[j], pOut, aReplace);
- }
- j += jsonNodeSize(&pNode[j]);
- }
- if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
- assert( pNode->eU==2 );
- pNode = &pNode[pNode->u.iAppend];
- j = 1;
- }
- jsonAppendChar(pOut, ']');
- break;
- }
- case JSON_OBJECT: {
- u32 j = 1;
- jsonAppendChar(pOut, '{');
- for(;;){
- while( j<=pNode->n ){
- if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 ){
- jsonAppendSeparator(pOut);
- jsonRenderNode(&pNode[j], pOut, aReplace);
- jsonAppendChar(pOut, ':');
- jsonRenderNode(&pNode[j+1], pOut, aReplace);
- }
- j += 1 + jsonNodeSize(&pNode[j+1]);
- }
- if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
- assert( pNode->eU==2 );
- pNode = &pNode[pNode->u.iAppend];
- j = 1;
- }
- jsonAppendChar(pOut, '}');
- break;
+ if( pParse ){
+ if( pParse->nJPRef>1 ){
+ pParse->nJPRef--;
+ }else{
+ jsonParseReset(pParse);
+ sqlite3DbFree(pParse->db, pParse);
}
}
}
-/*
-** Return a JsonNode and all its descendents as a JSON string.
-*/
-static void jsonReturnJson(
- JsonNode *pNode, /* Node to return */
- sqlite3_context *pCtx, /* Return value for this function */
- sqlite3_value **aReplace /* Array of replacement values */
-){
- JsonString s;
- jsonInit(&s, pCtx);
- jsonRenderNode(pNode, &s, aReplace);
- jsonResult(&s);
- sqlite3_result_subtype(pCtx, JSON_SUBTYPE);
-}
+/**************************************************************************
+** Utility routines for the JSON text parser
+**************************************************************************/
/*
** Translate a single byte of Hex into an integer.
-** This routine only works if h really is a valid hexadecimal
-** character: 0..9a..fA..F
+** This routine only gives a correct answer if h really is a valid hexadecimal
+** character: 0..9a..fA..F. But unlike sqlite3HexToInt(), it does not
+** assert() if the digit is not hex.
*/
static u8 jsonHexToInt(int h){
- assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') );
+#ifdef SQLITE_ASCII
+ h += 9*(1&(h>>6));
+#endif
#ifdef SQLITE_EBCDIC
h += 9*(1&~(h>>4));
-#else
- h += 9*(1&(h>>6));
#endif
return (u8)(h & 0xf);
}
@@ -200385,10 +204060,6 @@ static u8 jsonHexToInt(int h){
*/
static u32 jsonHexToInt4(const char *z){
u32 v;
- assert( sqlite3Isxdigit(z[0]) );
- assert( sqlite3Isxdigit(z[1]) );
- assert( sqlite3Isxdigit(z[2]) );
- assert( sqlite3Isxdigit(z[3]) );
v = (jsonHexToInt(z[0])<<12)
+ (jsonHexToInt(z[1])<<8)
+ (jsonHexToInt(z[2])<<4)
@@ -200397,227 +204068,6 @@ static u32 jsonHexToInt4(const char *z){
}
/*
-** Make the JsonNode the return value of the function.
-*/
-static void jsonReturn(
- JsonNode *pNode, /* Node to return */
- sqlite3_context *pCtx, /* Return value for this function */
- sqlite3_value **aReplace /* Array of replacement values */
-){
- switch( pNode->eType ){
- default: {
- assert( pNode->eType==JSON_NULL );
- sqlite3_result_null(pCtx);
- break;
- }
- case JSON_TRUE: {
- sqlite3_result_int(pCtx, 1);
- break;
- }
- case JSON_FALSE: {
- sqlite3_result_int(pCtx, 0);
- break;
- }
- case JSON_INT: {
- sqlite3_int64 i = 0;
- int rc;
- int bNeg = 0;
- const char *z;
-
-
- assert( pNode->eU==1 );
- z = pNode->u.zJContent;
- if( z[0]=='-' ){ z++; bNeg = 1; }
- else if( z[0]=='+' ){ z++; }
- rc = sqlite3DecOrHexToI64(z, &i);
- if( rc<=1 ){
- sqlite3_result_int64(pCtx, bNeg ? -i : i);
- }else if( rc==3 && bNeg ){
- sqlite3_result_int64(pCtx, SMALLEST_INT64);
- }else{
- goto to_double;
- }
- break;
- }
- case JSON_REAL: {
- double r;
- const char *z;
- assert( pNode->eU==1 );
- to_double:
- z = pNode->u.zJContent;
- sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8);
- sqlite3_result_double(pCtx, r);
- break;
- }
- case JSON_STRING: {
- if( pNode->jnFlags & JNODE_RAW ){
- assert( pNode->eU==1 );
- sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n,
- SQLITE_TRANSIENT);
- }else if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){
- /* JSON formatted without any backslash-escapes */
- assert( pNode->eU==1 );
- sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2,
- SQLITE_TRANSIENT);
- }else{
- /* Translate JSON formatted string into raw text */
- u32 i;
- u32 n = pNode->n;
- const char *z;
- char *zOut;
- u32 j;
- u32 nOut = n;
- assert( pNode->eU==1 );
- z = pNode->u.zJContent;
- zOut = sqlite3_malloc( nOut+1 );
- if( zOut==0 ){
- sqlite3_result_error_nomem(pCtx);
- break;
- }
- for(i=1, j=0; i<n-1; i++){
- char c = z[i];
- if( c=='\\' ){
- c = z[++i];
- if( c=='u' ){
- u32 v = jsonHexToInt4(z+i+1);
- i += 4;
- if( v==0 ) break;
- if( v<=0x7f ){
- zOut[j++] = (char)v;
- }else if( v<=0x7ff ){
- zOut[j++] = (char)(0xc0 | (v>>6));
- zOut[j++] = 0x80 | (v&0x3f);
- }else{
- u32 vlo;
- if( (v&0xfc00)==0xd800
- && i<n-6
- && z[i+1]=='\\'
- && z[i+2]=='u'
- && ((vlo = jsonHexToInt4(z+i+3))&0xfc00)==0xdc00
- ){
- /* We have a surrogate pair */
- v = ((v&0x3ff)<<10) + (vlo&0x3ff) + 0x10000;
- i += 6;
- zOut[j++] = 0xf0 | (v>>18);
- zOut[j++] = 0x80 | ((v>>12)&0x3f);
- zOut[j++] = 0x80 | ((v>>6)&0x3f);
- zOut[j++] = 0x80 | (v&0x3f);
- }else{
- zOut[j++] = 0xe0 | (v>>12);
- zOut[j++] = 0x80 | ((v>>6)&0x3f);
- zOut[j++] = 0x80 | (v&0x3f);
- }
- }
- continue;
- }else if( c=='b' ){
- c = '\b';
- }else if( c=='f' ){
- c = '\f';
- }else if( c=='n' ){
- c = '\n';
- }else if( c=='r' ){
- c = '\r';
- }else if( c=='t' ){
- c = '\t';
- }else if( c=='v' ){
- c = '\v';
- }else if( c=='\'' || c=='"' || c=='/' || c=='\\' ){
- /* pass through unchanged */
- }else if( c=='0' ){
- c = 0;
- }else if( c=='x' ){
- c = (jsonHexToInt(z[i+1])<<4) | jsonHexToInt(z[i+2]);
- i += 2;
- }else if( c=='\r' && z[i+1]=='\n' ){
- i++;
- continue;
- }else if( 0xe2==(u8)c ){
- assert( 0x80==(u8)z[i+1] );
- assert( 0xa8==(u8)z[i+2] || 0xa9==(u8)z[i+2] );
- i += 2;
- continue;
- }else{
- continue;
- }
- } /* end if( c=='\\' ) */
- zOut[j++] = c;
- } /* end for() */
- zOut[j] = 0;
- sqlite3_result_text(pCtx, zOut, j, sqlite3_free);
- }
- break;
- }
- case JSON_ARRAY:
- case JSON_OBJECT: {
- jsonReturnJson(pNode, pCtx, aReplace);
- break;
- }
- }
-}
-
-/* Forward reference */
-static int jsonParseAddNode(JsonParse*,u32,u32,const char*);
-
-/*
-** A macro to hint to the compiler that a function should not be
-** inlined.
-*/
-#if defined(__GNUC__)
-# define JSON_NOINLINE __attribute__((noinline))
-#elif defined(_MSC_VER) && _MSC_VER>=1310
-# define JSON_NOINLINE __declspec(noinline)
-#else
-# define JSON_NOINLINE
-#endif
-
-
-static JSON_NOINLINE int jsonParseAddNodeExpand(
- JsonParse *pParse, /* Append the node to this object */
- u32 eType, /* Node type */
- u32 n, /* Content size or sub-node count */
- const char *zContent /* Content */
-){
- u32 nNew;
- JsonNode *pNew;
- assert( pParse->nNode>=pParse->nAlloc );
- if( pParse->oom ) return -1;
- nNew = pParse->nAlloc*2 + 10;
- pNew = sqlite3_realloc64(pParse->aNode, sizeof(JsonNode)*nNew);
- if( pNew==0 ){
- pParse->oom = 1;
- return -1;
- }
- pParse->nAlloc = nNew;
- pParse->aNode = pNew;
- assert( pParse->nNode<pParse->nAlloc );
- return jsonParseAddNode(pParse, eType, n, zContent);
-}
-
-/*
-** Create a new JsonNode instance based on the arguments and append that
-** instance to the JsonParse. Return the index in pParse->aNode[] of the
-** new node, or -1 if a memory allocation fails.
-*/
-static int jsonParseAddNode(
- JsonParse *pParse, /* Append the node to this object */
- u32 eType, /* Node type */
- u32 n, /* Content size or sub-node count */
- const char *zContent /* Content */
-){
- JsonNode *p;
- if( pParse->aNode==0 || pParse->nNode>=pParse->nAlloc ){
- return jsonParseAddNodeExpand(pParse, eType, n, zContent);
- }
- p = &pParse->aNode[pParse->nNode];
- p->eType = (u8)(eType & 0xff);
- p->jnFlags = (u8)(eType >> 8);
- VVA( p->eU = zContent ? 1 : 0 );
- p->n = n;
- p->u.zJContent = zContent;
- return pParse->nNode++;
-}
-
-/*
** Return true if z[] begins with 2 (or more) hexadecimal digits
*/
static int jsonIs2Hex(const char *z){
@@ -200770,63 +204220,500 @@ static const struct NanInfName {
char *zMatch;
char *zRepl;
} aNanInfName[] = {
- { 'i', 'I', 3, JSON_REAL, 7, "inf", "9.0e999" },
- { 'i', 'I', 8, JSON_REAL, 7, "infinity", "9.0e999" },
- { 'n', 'N', 3, JSON_NULL, 4, "NaN", "null" },
- { 'q', 'Q', 4, JSON_NULL, 4, "QNaN", "null" },
- { 's', 'S', 4, JSON_NULL, 4, "SNaN", "null" },
+ { 'i', 'I', 3, JSONB_FLOAT, 7, "inf", "9.0e999" },
+ { 'i', 'I', 8, JSONB_FLOAT, 7, "infinity", "9.0e999" },
+ { 'n', 'N', 3, JSONB_NULL, 4, "NaN", "null" },
+ { 'q', 'Q', 4, JSONB_NULL, 4, "QNaN", "null" },
+ { 's', 'S', 4, JSONB_NULL, 4, "SNaN", "null" },
};
+
/*
-** Parse a single JSON value which begins at pParse->zJson[i]. Return the
-** index of the first character past the end of the value parsed.
+** Report the wrong number of arguments for json_insert(), json_replace()
+** or json_set().
+*/
+static void jsonWrongNumArgs(
+ sqlite3_context *pCtx,
+ const char *zFuncName
+){
+ char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments",
+ zFuncName);
+ sqlite3_result_error(pCtx, zMsg, -1);
+ sqlite3_free(zMsg);
+}
+
+/****************************************************************************
+** Utility routines for dealing with the binary BLOB representation of JSON
+****************************************************************************/
+
+/*
+** Expand pParse->aBlob so that it holds at least N bytes.
**
-** Special return values:
+** Return the number of errors.
+*/
+static int jsonBlobExpand(JsonParse *pParse, u32 N){
+ u8 *aNew;
+ u32 t;
+ assert( N>pParse->nBlobAlloc );
+ if( pParse->nBlobAlloc==0 ){
+ t = 100;
+ }else{
+ t = pParse->nBlobAlloc*2;
+ }
+ if( t<N ) t = N+100;
+ aNew = sqlite3DbRealloc(pParse->db, pParse->aBlob, t);
+ if( aNew==0 ){ pParse->oom = 1; return 1; }
+ pParse->aBlob = aNew;
+ pParse->nBlobAlloc = t;
+ return 0;
+}
+
+/*
+** If pParse->aBlob is not previously editable (because it is taken
+** from sqlite3_value_blob(), as indicated by the fact that
+** pParse->nBlobAlloc==0 and pParse->nBlob>0) then make it editable
+** by making a copy into space obtained from malloc.
**
-** 0 End if input
-** -1 Syntax error
-** -2 '}' seen
-** -3 ']' seen
-** -4 ',' seen
-** -5 ':' seen
+** Return true on success. Return false on OOM.
*/
-static int jsonParseValue(JsonParse *pParse, u32 i){
+static int jsonBlobMakeEditable(JsonParse *pParse, u32 nExtra){
+ u8 *aOld;
+ u32 nSize;
+ assert( !pParse->bReadOnly );
+ if( pParse->oom ) return 0;
+ if( pParse->nBlobAlloc>0 ) return 1;
+ aOld = pParse->aBlob;
+ nSize = pParse->nBlob + nExtra;
+ pParse->aBlob = 0;
+ if( jsonBlobExpand(pParse, nSize) ){
+ return 0;
+ }
+ assert( pParse->nBlobAlloc >= pParse->nBlob + nExtra );
+ memcpy(pParse->aBlob, aOld, pParse->nBlob);
+ return 1;
+}
+
+/* Expand pParse->aBlob and append one bytes.
+*/
+static SQLITE_NOINLINE void jsonBlobExpandAndAppendOneByte(
+ JsonParse *pParse,
+ u8 c
+){
+ jsonBlobExpand(pParse, pParse->nBlob+1);
+ if( pParse->oom==0 ){
+ assert( pParse->nBlob+1<=pParse->nBlobAlloc );
+ pParse->aBlob[pParse->nBlob++] = c;
+ }
+}
+
+/* Append a single character.
+*/
+static void jsonBlobAppendOneByte(JsonParse *pParse, u8 c){
+ if( pParse->nBlob >= pParse->nBlobAlloc ){
+ jsonBlobExpandAndAppendOneByte(pParse, c);
+ }else{
+ pParse->aBlob[pParse->nBlob++] = c;
+ }
+}
+
+/* Slow version of jsonBlobAppendNode() that first resizes the
+** pParse->aBlob structure.
+*/
+static void jsonBlobAppendNode(JsonParse*,u8,u32,const void*);
+static SQLITE_NOINLINE void jsonBlobExpandAndAppendNode(
+ JsonParse *pParse,
+ u8 eType,
+ u32 szPayload,
+ const void *aPayload
+){
+ if( jsonBlobExpand(pParse, pParse->nBlob+szPayload+9) ) return;
+ jsonBlobAppendNode(pParse, eType, szPayload, aPayload);
+}
+
+
+/* Append an node type byte together with the payload size and
+** possibly also the payload.
+**
+** If aPayload is not NULL, then it is a pointer to the payload which
+** is also appended. If aPayload is NULL, the pParse->aBlob[] array
+** is resized (if necessary) so that it is big enough to hold the
+** payload, but the payload is not appended and pParse->nBlob is left
+** pointing to where the first byte of payload will eventually be.
+*/
+static void jsonBlobAppendNode(
+ JsonParse *pParse, /* The JsonParse object under construction */
+ u8 eType, /* Node type. One of JSONB_* */
+ u32 szPayload, /* Number of bytes of payload */
+ const void *aPayload /* The payload. Might be NULL */
+){
+ u8 *a;
+ if( pParse->nBlob+szPayload+9 > pParse->nBlobAlloc ){
+ jsonBlobExpandAndAppendNode(pParse,eType,szPayload,aPayload);
+ return;
+ }
+ assert( pParse->aBlob!=0 );
+ a = &pParse->aBlob[pParse->nBlob];
+ if( szPayload<=11 ){
+ a[0] = eType | (szPayload<<4);
+ pParse->nBlob += 1;
+ }else if( szPayload<=0xff ){
+ a[0] = eType | 0xc0;
+ a[1] = szPayload & 0xff;
+ pParse->nBlob += 2;
+ }else if( szPayload<=0xffff ){
+ a[0] = eType | 0xd0;
+ a[1] = (szPayload >> 8) & 0xff;
+ a[2] = szPayload & 0xff;
+ pParse->nBlob += 3;
+ }else{
+ a[0] = eType | 0xe0;
+ a[1] = (szPayload >> 24) & 0xff;
+ a[2] = (szPayload >> 16) & 0xff;
+ a[3] = (szPayload >> 8) & 0xff;
+ a[4] = szPayload & 0xff;
+ pParse->nBlob += 5;
+ }
+ if( aPayload ){
+ pParse->nBlob += szPayload;
+ memcpy(&pParse->aBlob[pParse->nBlob-szPayload], aPayload, szPayload);
+ }
+}
+
+/* Change the payload size for the node at index i to be szPayload.
+*/
+static int jsonBlobChangePayloadSize(
+ JsonParse *pParse,
+ u32 i,
+ u32 szPayload
+){
+ u8 *a;
+ u8 szType;
+ u8 nExtra;
+ u8 nNeeded;
+ int delta;
+ if( pParse->oom ) return 0;
+ a = &pParse->aBlob[i];
+ szType = a[0]>>4;
+ if( szType<=11 ){
+ nExtra = 0;
+ }else if( szType==12 ){
+ nExtra = 1;
+ }else if( szType==13 ){
+ nExtra = 2;
+ }else{
+ nExtra = 4;
+ }
+ if( szPayload<=11 ){
+ nNeeded = 0;
+ }else if( szPayload<=0xff ){
+ nNeeded = 1;
+ }else if( szPayload<=0xffff ){
+ nNeeded = 2;
+ }else{
+ nNeeded = 4;
+ }
+ delta = nNeeded - nExtra;
+ if( delta ){
+ u32 newSize = pParse->nBlob + delta;
+ if( delta>0 ){
+ if( newSize>pParse->nBlobAlloc && jsonBlobExpand(pParse, newSize) ){
+ return 0; /* OOM error. Error state recorded in pParse->oom. */
+ }
+ a = &pParse->aBlob[i];
+ memmove(&a[1+delta], &a[1], pParse->nBlob - (i+1));
+ }else{
+ memmove(&a[1], &a[1-delta], pParse->nBlob - (i+1-delta));
+ }
+ pParse->nBlob = newSize;
+ }
+ if( nNeeded==0 ){
+ a[0] = (a[0] & 0x0f) | (szPayload<<4);
+ }else if( nNeeded==1 ){
+ a[0] = (a[0] & 0x0f) | 0xc0;
+ a[1] = szPayload & 0xff;
+ }else if( nNeeded==2 ){
+ a[0] = (a[0] & 0x0f) | 0xd0;
+ a[1] = (szPayload >> 8) & 0xff;
+ a[2] = szPayload & 0xff;
+ }else{
+ a[0] = (a[0] & 0x0f) | 0xe0;
+ a[1] = (szPayload >> 24) & 0xff;
+ a[2] = (szPayload >> 16) & 0xff;
+ a[3] = (szPayload >> 8) & 0xff;
+ a[4] = szPayload & 0xff;
+ }
+ return delta;
+}
+
+/*
+** If z[0] is 'u' and is followed by exactly 4 hexadecimal character,
+** then set *pOp to JSONB_TEXTJ and return true. If not, do not make
+** any changes to *pOp and return false.
+*/
+static int jsonIs4HexB(const char *z, int *pOp){
+ if( z[0]!='u' ) return 0;
+ if( !jsonIs4Hex(&z[1]) ) return 0;
+ *pOp = JSONB_TEXTJ;
+ return 1;
+}
+
+/*
+** Check a single element of the JSONB in pParse for validity.
+**
+** The element to be checked starts at offset i and must end at on the
+** last byte before iEnd.
+**
+** Return 0 if everything is correct. Return the 1-based byte offset of the
+** error if a problem is detected. (In other words, if the error is at offset
+** 0, return 1).
+*/
+static u32 jsonbValidityCheck(
+ const JsonParse *pParse, /* Input JSONB. Only aBlob and nBlob are used */
+ u32 i, /* Start of element as pParse->aBlob[i] */
+ u32 iEnd, /* One more than the last byte of the element */
+ u32 iDepth /* Current nesting depth */
+){
+ u32 n, sz, j, k;
+ const u8 *z;
+ u8 x;
+ if( iDepth>JSON_MAX_DEPTH ) return i+1;
+ sz = 0;
+ n = jsonbPayloadSize(pParse, i, &sz);
+ if( NEVER(n==0) ) return i+1; /* Checked by caller */
+ if( NEVER(i+n+sz!=iEnd) ) return i+1; /* Checked by caller */
+ z = pParse->aBlob;
+ x = z[i] & 0x0f;
+ switch( x ){
+ case JSONB_NULL:
+ case JSONB_TRUE:
+ case JSONB_FALSE: {
+ return n+sz==1 ? 0 : i+1;
+ }
+ case JSONB_INT: {
+ if( sz<1 ) return i+1;
+ j = i+n;
+ if( z[j]=='-' ){
+ j++;
+ if( sz<2 ) return i+1;
+ }
+ k = i+n+sz;
+ while( j<k ){
+ if( sqlite3Isdigit(z[j]) ){
+ j++;
+ }else{
+ return j+1;
+ }
+ }
+ return 0;
+ }
+ case JSONB_INT5: {
+ if( sz<3 ) return i+1;
+ j = i+n;
+ if( z[j]=='-' ){
+ if( sz<4 ) return i+1;
+ j++;
+ }
+ if( z[j]!='0' ) return i+1;
+ if( z[j+1]!='x' && z[j+1]!='X' ) return j+2;
+ j += 2;
+ k = i+n+sz;
+ while( j<k ){
+ if( sqlite3Isxdigit(z[j]) ){
+ j++;
+ }else{
+ return j+1;
+ }
+ }
+ return 0;
+ }
+ case JSONB_FLOAT:
+ case JSONB_FLOAT5: {
+ u8 seen = 0; /* 0: initial. 1: '.' seen 2: 'e' seen */
+ if( sz<2 ) return i+1;
+ j = i+n;
+ k = j+sz;
+ if( z[j]=='-' ){
+ j++;
+ if( sz<3 ) return i+1;
+ }
+ if( z[j]=='.' ){
+ if( x==JSONB_FLOAT ) return j+1;
+ if( !sqlite3Isdigit(z[j+1]) ) return j+1;
+ j += 2;
+ seen = 1;
+ }else if( z[j]=='0' && x==JSONB_FLOAT ){
+ if( j+3>k ) return j+1;
+ if( z[j+1]!='.' && z[j+1]!='e' && z[j+1]!='E' ) return j+1;
+ j++;
+ }
+ for(; j<k; j++){
+ if( sqlite3Isdigit(z[j]) ) continue;
+ if( z[j]=='.' ){
+ if( seen>0 ) return j+1;
+ if( x==JSONB_FLOAT && (j==k-1 || !sqlite3Isdigit(z[j+1])) ){
+ return j+1;
+ }
+ seen = 1;
+ continue;
+ }
+ if( z[j]=='e' || z[j]=='E' ){
+ if( seen==2 ) return j+1;
+ if( j==k-1 ) return j+1;
+ if( z[j+1]=='+' || z[j+1]=='-' ){
+ j++;
+ if( j==k-1 ) return j+1;
+ }
+ seen = 2;
+ continue;
+ }
+ return j+1;
+ }
+ if( seen==0 ) return i+1;
+ return 0;
+ }
+ case JSONB_TEXT: {
+ j = i+n;
+ k = j+sz;
+ while( j<k ){
+ if( !jsonIsOk[z[j]] && z[j]!='\'' ) return j+1;
+ j++;
+ }
+ return 0;
+ }
+ case JSONB_TEXTJ:
+ case JSONB_TEXT5: {
+ j = i+n;
+ k = j+sz;
+ while( j<k ){
+ if( !jsonIsOk[z[j]] && z[j]!='\'' ){
+ if( z[j]=='"' ){
+ if( x==JSONB_TEXTJ ) return j+1;
+ }else if( z[j]!='\\' || j+1>=k ){
+ return j+1;
+ }else if( strchr("\"\\/bfnrt",z[j+1])!=0 ){
+ j++;
+ }else if( z[j+1]=='u' ){
+ if( j+5>=k ) return j+1;
+ if( !jsonIs4Hex((const char*)&z[j+2]) ) return j+1;
+ j++;
+ }else if( x!=JSONB_TEXT5 ){
+ return j+1;
+ }else{
+ u32 c = 0;
+ u32 szC = jsonUnescapeOneChar((const char*)&z[j], k-j, &c);
+ if( c==JSON_INVALID_CHAR ) return j+1;
+ j += szC - 1;
+ }
+ }
+ j++;
+ }
+ return 0;
+ }
+ case JSONB_TEXTRAW: {
+ return 0;
+ }
+ case JSONB_ARRAY: {
+ u32 sub;
+ j = i+n;
+ k = j+sz;
+ while( j<k ){
+ sz = 0;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return j+1;
+ if( j+n+sz>k ) return j+1;
+ sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1);
+ if( sub ) return sub;
+ j += n + sz;
+ }
+ assert( j==k );
+ return 0;
+ }
+ case JSONB_OBJECT: {
+ u32 cnt = 0;
+ u32 sub;
+ j = i+n;
+ k = j+sz;
+ while( j<k ){
+ sz = 0;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return j+1;
+ if( j+n+sz>k ) return j+1;
+ if( (cnt & 1)==0 ){
+ x = z[j] & 0x0f;
+ if( x<JSONB_TEXT || x>JSONB_TEXTRAW ) return j+1;
+ }
+ sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1);
+ if( sub ) return sub;
+ cnt++;
+ j += n + sz;
+ }
+ assert( j==k );
+ if( (cnt & 1)!=0 ) return j+1;
+ return 0;
+ }
+ default: {
+ return i+1;
+ }
+ }
+}
+
+/*
+** Translate a single element of JSON text at pParse->zJson[i] into
+** its equivalent binary JSONB representation. Append the translation into
+** pParse->aBlob[] beginning at pParse->nBlob. The size of
+** pParse->aBlob[] is increased as necessary.
+**
+** Return the index of the first character past the end of the element parsed,
+** or one of the following special result codes:
+**
+** 0 End of input
+** -1 Syntax error or OOM
+** -2 '}' seen \
+** -3 ']' seen \___ For these returns, pParse->iErr is set to
+** -4 ',' seen / the index in zJson[] of the seen character
+** -5 ':' seen /
+*/
+static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){
char c;
u32 j;
- int iThis;
+ u32 iThis, iStart;
int x;
- JsonNode *pNode;
+ u8 t;
const char *z = pParse->zJson;
json_parse_restart:
switch( (u8)z[i] ){
case '{': {
/* Parse object */
- iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
- if( iThis<0 ) return -1;
+ iThis = pParse->nBlob;
+ jsonBlobAppendNode(pParse, JSONB_OBJECT, pParse->nJson-i, 0);
if( ++pParse->iDepth > JSON_MAX_DEPTH ){
pParse->iErr = i;
return -1;
}
+ iStart = pParse->nBlob;
for(j=i+1;;j++){
- u32 nNode = pParse->nNode;
- x = jsonParseValue(pParse, j);
+ u32 iBlob = pParse->nBlob;
+ x = jsonTranslateTextToBlob(pParse, j);
if( x<=0 ){
+ int op;
if( x==(-2) ){
j = pParse->iErr;
- if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1;
+ if( pParse->nBlob!=(u32)iStart ) pParse->hasNonstd = 1;
break;
}
j += json5Whitespace(&z[j]);
+ op = JSONB_TEXT;
if( sqlite3JsonId1(z[j])
- || (z[j]=='\\' && z[j+1]=='u' && jsonIs4Hex(&z[j+2]))
+ || (z[j]=='\\' && jsonIs4HexB(&z[j+1], &op))
){
int k = j+1;
while( (sqlite3JsonId2(z[k]) && json5Whitespace(&z[k])==0)
- || (z[k]=='\\' && z[k+1]=='u' && jsonIs4Hex(&z[k+2]))
+ || (z[k]=='\\' && jsonIs4HexB(&z[k+1], &op))
){
k++;
}
- jsonParseAddNode(pParse, JSON_STRING | (JNODE_RAW<<8), k-j, &z[j]);
+ assert( iBlob==pParse->nBlob );
+ jsonBlobAppendNode(pParse, op, k-j, &z[j]);
pParse->hasNonstd = 1;
x = k;
}else{
@@ -200835,24 +204722,24 @@ json_parse_restart:
}
}
if( pParse->oom ) return -1;
- pNode = &pParse->aNode[nNode];
- if( pNode->eType!=JSON_STRING ){
+ t = pParse->aBlob[iBlob] & 0x0f;
+ if( t<JSONB_TEXT || t>JSONB_TEXTRAW ){
pParse->iErr = j;
return -1;
}
- pNode->jnFlags |= JNODE_LABEL;
j = x;
if( z[j]==':' ){
j++;
}else{
- if( fast_isspace(z[j]) ){
- do{ j++; }while( fast_isspace(z[j]) );
+ if( jsonIsspace(z[j]) ){
+ /* strspn() is not helpful here */
+ do{ j++; }while( jsonIsspace(z[j]) );
if( z[j]==':' ){
j++;
goto parse_object_value;
}
}
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x!=(-5) ){
if( x!=(-1) ) pParse->iErr = j;
return -1;
@@ -200860,7 +204747,7 @@ json_parse_restart:
j = pParse->iErr+1;
}
parse_object_value:
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x<=0 ){
if( x!=(-1) ) pParse->iErr = j;
return -1;
@@ -200871,15 +204758,15 @@ json_parse_restart:
}else if( z[j]=='}' ){
break;
}else{
- if( fast_isspace(z[j]) ){
- do{ j++; }while( fast_isspace(z[j]) );
+ if( jsonIsspace(z[j]) ){
+ j += 1 + (u32)strspn(&z[j+1], jsonSpaces);
if( z[j]==',' ){
continue;
}else if( z[j]=='}' ){
break;
}
}
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x==(-4) ){
j = pParse->iErr;
continue;
@@ -200892,25 +204779,26 @@ json_parse_restart:
pParse->iErr = j;
return -1;
}
- pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
+ jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart);
pParse->iDepth--;
return j+1;
}
case '[': {
/* Parse array */
- iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
- if( iThis<0 ) return -1;
+ iThis = pParse->nBlob;
+ jsonBlobAppendNode(pParse, JSONB_ARRAY, pParse->nJson - i, 0);
+ iStart = pParse->nBlob;
+ if( pParse->oom ) return -1;
if( ++pParse->iDepth > JSON_MAX_DEPTH ){
pParse->iErr = i;
return -1;
}
- memset(&pParse->aNode[iThis].u, 0, sizeof(pParse->aNode[iThis].u));
for(j=i+1;;j++){
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x<=0 ){
if( x==(-3) ){
j = pParse->iErr;
- if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1;
+ if( pParse->nBlob!=iStart ) pParse->hasNonstd = 1;
break;
}
if( x!=(-1) ) pParse->iErr = j;
@@ -200922,15 +204810,15 @@ json_parse_restart:
}else if( z[j]==']' ){
break;
}else{
- if( fast_isspace(z[j]) ){
- do{ j++; }while( fast_isspace(z[j]) );
+ if( jsonIsspace(z[j]) ){
+ j += 1 + (u32)strspn(&z[j+1], jsonSpaces);
if( z[j]==',' ){
continue;
}else if( z[j]==']' ){
break;
}
}
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x==(-4) ){
j = pParse->iErr;
continue;
@@ -200943,60 +204831,71 @@ json_parse_restart:
pParse->iErr = j;
return -1;
}
- pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
+ jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart);
pParse->iDepth--;
return j+1;
}
case '\'': {
- u8 jnFlags;
+ u8 opcode;
char cDelim;
pParse->hasNonstd = 1;
- jnFlags = JNODE_JSON5;
+ opcode = JSONB_TEXT;
goto parse_string;
case '"':
/* Parse string */
- jnFlags = 0;
+ opcode = JSONB_TEXT;
parse_string:
cDelim = z[i];
j = i+1;
- for(;;){
- c = z[j];
- if( (c & ~0x1f)==0 ){
- /* Control characters are not allowed in strings */
- pParse->iErr = j;
- return -1;
+ while( 1 /*exit-by-break*/ ){
+ if( jsonIsOk[(u8)z[j]] ){
+ if( !jsonIsOk[(u8)z[j+1]] ){
+ j += 1;
+ }else if( !jsonIsOk[(u8)z[j+2]] ){
+ j += 2;
+ }else{
+ j += 3;
+ continue;
+ }
}
- if( c=='\\' ){
+ c = z[j];
+ if( c==cDelim ){
+ break;
+ }else if( c=='\\' ){
c = z[++j];
if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f'
|| c=='n' || c=='r' || c=='t'
|| (c=='u' && jsonIs4Hex(&z[j+1])) ){
- jnFlags |= JNODE_ESCAPE;
+ if( opcode==JSONB_TEXT ) opcode = JSONB_TEXTJ;
}else if( c=='\'' || c=='0' || c=='v' || c=='\n'
|| (0xe2==(u8)c && 0x80==(u8)z[j+1]
&& (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2]))
|| (c=='x' && jsonIs2Hex(&z[j+1])) ){
- jnFlags |= (JNODE_ESCAPE|JNODE_JSON5);
+ opcode = JSONB_TEXT5;
pParse->hasNonstd = 1;
}else if( c=='\r' ){
if( z[j+1]=='\n' ) j++;
- jnFlags |= (JNODE_ESCAPE|JNODE_JSON5);
+ opcode = JSONB_TEXT5;
pParse->hasNonstd = 1;
}else{
pParse->iErr = j;
return -1;
}
- }else if( c==cDelim ){
- break;
+ }else if( c<=0x1f ){
+ /* Control characters are not allowed in strings */
+ pParse->iErr = j;
+ return -1;
+ }else if( c=='"' ){
+ opcode = JSONB_TEXT5;
}
j++;
}
- jsonParseAddNode(pParse, JSON_STRING | (jnFlags<<8), j+1-i, &z[i]);
+ jsonBlobAppendNode(pParse, opcode, j-1-i, &z[i+1]);
return j+1;
}
case 't': {
if( strncmp(z+i,"true",4)==0 && !sqlite3Isalnum(z[i+4]) ){
- jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
+ jsonBlobAppendOneByte(pParse, JSONB_TRUE);
return i+4;
}
pParse->iErr = i;
@@ -201004,23 +204903,22 @@ json_parse_restart:
}
case 'f': {
if( strncmp(z+i,"false",5)==0 && !sqlite3Isalnum(z[i+5]) ){
- jsonParseAddNode(pParse, JSON_FALSE, 0, 0);
+ jsonBlobAppendOneByte(pParse, JSONB_FALSE);
return i+5;
}
pParse->iErr = i;
return -1;
}
case '+': {
- u8 seenDP, seenE, jnFlags;
+ u8 seenE;
pParse->hasNonstd = 1;
- jnFlags = JNODE_JSON5;
+ t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */
goto parse_number;
case '.':
if( sqlite3Isdigit(z[i+1]) ){
pParse->hasNonstd = 1;
- jnFlags = JNODE_JSON5;
+ t = 0x03; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */
seenE = 0;
- seenDP = JSON_REAL;
goto parse_number_2;
}
pParse->iErr = i;
@@ -201037,9 +204935,8 @@ json_parse_restart:
case '8':
case '9':
/* Parse number */
- jnFlags = 0;
+ t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */
parse_number:
- seenDP = JSON_INT;
seenE = 0;
assert( '-' < '0' );
assert( '+' < '0' );
@@ -201049,9 +204946,9 @@ json_parse_restart:
if( c<='0' ){
if( c=='0' ){
if( (z[i+1]=='x' || z[i+1]=='X') && sqlite3Isxdigit(z[i+2]) ){
- assert( seenDP==JSON_INT );
+ assert( t==0x00 );
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t = 0x01;
for(j=i+3; sqlite3Isxdigit(z[j]); j++){}
goto parse_number_finish;
}else if( sqlite3Isdigit(z[i+1]) ){
@@ -201068,15 +204965,15 @@ json_parse_restart:
){
pParse->hasNonstd = 1;
if( z[i]=='-' ){
- jsonParseAddNode(pParse, JSON_REAL, 8, "-9.0e999");
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999");
}else{
- jsonParseAddNode(pParse, JSON_REAL, 7, "9.0e999");
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999");
}
return i + (sqlite3StrNICmp(&z[i+4],"inity",5)==0 ? 9 : 4);
}
if( z[i+1]=='.' ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
goto parse_number_2;
}
pParse->iErr = i;
@@ -201088,30 +204985,31 @@ json_parse_restart:
return -1;
}else if( (z[i+2]=='x' || z[i+2]=='X') && sqlite3Isxdigit(z[i+3]) ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
for(j=i+4; sqlite3Isxdigit(z[j]); j++){}
goto parse_number_finish;
}
}
}
}
+
parse_number_2:
for(j=i+1;; j++){
c = z[j];
if( sqlite3Isdigit(c) ) continue;
if( c=='.' ){
- if( seenDP==JSON_REAL ){
+ if( (t & 0x02)!=0 ){
pParse->iErr = j;
return -1;
}
- seenDP = JSON_REAL;
+ t |= 0x02;
continue;
}
if( c=='e' || c=='E' ){
if( z[j-1]<'0' ){
if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
}else{
pParse->iErr = j;
return -1;
@@ -201121,7 +205019,7 @@ json_parse_restart:
pParse->iErr = j;
return -1;
}
- seenDP = JSON_REAL;
+ t |= 0x02;
seenE = 1;
c = z[j+1];
if( c=='+' || c=='-' ){
@@ -201139,14 +205037,18 @@ json_parse_restart:
if( z[j-1]<'0' ){
if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
}else{
pParse->iErr = j;
return -1;
}
}
parse_number_finish:
- jsonParseAddNode(pParse, seenDP | (jnFlags<<8), j - i, &z[i]);
+ assert( JSONB_INT+0x01==JSONB_INT5 );
+ assert( JSONB_FLOAT+0x01==JSONB_FLOAT5 );
+ assert( JSONB_INT+0x02==JSONB_FLOAT );
+ if( z[i]=='+' ) i++;
+ jsonBlobAppendNode(pParse, JSONB_INT+t, j-i, &z[i]);
return j;
}
case '}': {
@@ -201172,9 +205074,7 @@ json_parse_restart:
case 0x0a:
case 0x0d:
case 0x20: {
- do{
- i++;
- }while( fast_isspace(z[i]) );
+ i += 1 + (u32)strspn(&z[i+1], jsonSpaces);
goto json_parse_restart;
}
case 0x0b:
@@ -201196,7 +205096,7 @@ json_parse_restart:
}
case 'n': {
if( strncmp(z+i,"null",4)==0 && !sqlite3Isalnum(z[i+4]) ){
- jsonParseAddNode(pParse, JSON_NULL, 0, 0);
+ jsonBlobAppendOneByte(pParse, JSONB_NULL);
return i+4;
}
/* fall-through into the default case that checks for NaN */
@@ -201212,8 +205112,11 @@ json_parse_restart:
continue;
}
if( sqlite3Isalnum(z[i+nn]) ) continue;
- jsonParseAddNode(pParse, aNanInfName[k].eType,
- aNanInfName[k].nRepl, aNanInfName[k].zRepl);
+ if( aNanInfName[k].eType==JSONB_FLOAT ){
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999");
+ }else{
+ jsonBlobAppendOneByte(pParse, JSONB_NULL);
+ }
pParse->hasNonstd = 1;
return i + nn;
}
@@ -201223,30 +205126,35 @@ json_parse_restart:
} /* End switch(z[i]) */
}
+
/*
** Parse a complete JSON string. Return 0 on success or non-zero if there
-** are any errors. If an error occurs, free all memory associated with
-** pParse.
+** are any errors. If an error occurs, free all memory held by pParse,
+** but not pParse itself.
**
-** pParse is uninitialized when this routine is called.
+** pParse must be initialized to an empty parse object prior to calling
+** this routine.
*/
-static int jsonParse(
+static int jsonConvertTextToBlob(
JsonParse *pParse, /* Initialize and fill this JsonParse object */
- sqlite3_context *pCtx, /* Report errors here */
- const char *zJson /* Input JSON text to be parsed */
+ sqlite3_context *pCtx /* Report errors here */
){
int i;
- memset(pParse, 0, sizeof(*pParse));
- if( zJson==0 ) return 1;
- pParse->zJson = zJson;
- i = jsonParseValue(pParse, 0);
+ const char *zJson = pParse->zJson;
+ i = jsonTranslateTextToBlob(pParse, 0);
if( pParse->oom ) i = -1;
if( i>0 ){
+#ifdef SQLITE_DEBUG
assert( pParse->iDepth==0 );
- while( fast_isspace(zJson[i]) ) i++;
+ if( sqlite3Config.bJsonSelfcheck ){
+ assert( jsonbValidityCheck(pParse, 0, pParse->nBlob, 0)==0 );
+ }
+#endif
+ while( jsonIsspace(zJson[i]) ) i++;
if( zJson[i] ){
i += json5Whitespace(&zJson[i]);
if( zJson[i] ){
+ if( pCtx ) sqlite3_result_error(pCtx, "malformed JSON", -1);
jsonParseReset(pParse);
return 1;
}
@@ -201267,183 +205175,715 @@ static int jsonParse(
return 0;
}
-/* Mark node i of pParse as being a child of iParent. Call recursively
-** to fill in all the descendants of node i.
+/*
+** The input string pStr is a well-formed JSON text string. Convert
+** this into the JSONB format and make it the return value of the
+** SQL function.
*/
-static void jsonParseFillInParentage(JsonParse *pParse, u32 i, u32 iParent){
- JsonNode *pNode = &pParse->aNode[i];
- u32 j;
- pParse->aUp[i] = iParent;
- switch( pNode->eType ){
- case JSON_ARRAY: {
- for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j)){
- jsonParseFillInParentage(pParse, i+j, i);
+static void jsonReturnStringAsBlob(JsonString *pStr){
+ JsonParse px;
+ memset(&px, 0, sizeof(px));
+ jsonStringTerminate(pStr);
+ px.zJson = pStr->zBuf;
+ px.nJson = pStr->nUsed;
+ px.db = sqlite3_context_db_handle(pStr->pCtx);
+ (void)jsonTranslateTextToBlob(&px, 0);
+ if( px.oom ){
+ sqlite3DbFree(px.db, px.aBlob);
+ sqlite3_result_error_nomem(pStr->pCtx);
+ }else{
+ assert( px.nBlobAlloc>0 );
+ assert( !px.bReadOnly );
+ sqlite3_result_blob(pStr->pCtx, px.aBlob, px.nBlob, SQLITE_DYNAMIC);
+ }
+}
+
+/* The byte at index i is a node type-code. This routine
+** determines the payload size for that node and writes that
+** payload size in to *pSz. It returns the offset from i to the
+** beginning of the payload. Return 0 on error.
+*/
+static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){
+ u8 x;
+ u32 sz;
+ u32 n;
+ if( NEVER(i>pParse->nBlob) ){
+ *pSz = 0;
+ return 0;
+ }
+ x = pParse->aBlob[i]>>4;
+ if( x<=11 ){
+ sz = x;
+ n = 1;
+ }else if( x==12 ){
+ if( i+1>=pParse->nBlob ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = pParse->aBlob[i+1];
+ n = 2;
+ }else if( x==13 ){
+ if( i+2>=pParse->nBlob ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = (pParse->aBlob[i+1]<<8) + pParse->aBlob[i+2];
+ n = 3;
+ }else if( x==14 ){
+ if( i+4>=pParse->nBlob ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = ((u32)pParse->aBlob[i+1]<<24) + (pParse->aBlob[i+2]<<16) +
+ (pParse->aBlob[i+3]<<8) + pParse->aBlob[i+4];
+ n = 5;
+ }else{
+ if( i+8>=pParse->nBlob
+ || pParse->aBlob[i+1]!=0
+ || pParse->aBlob[i+2]!=0
+ || pParse->aBlob[i+3]!=0
+ || pParse->aBlob[i+4]!=0
+ ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = (pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) +
+ (pParse->aBlob[i+7]<<8) + pParse->aBlob[i+8];
+ n = 9;
+ }
+ if( (i64)i+sz+n > pParse->nBlob
+ && (i64)i+sz+n > pParse->nBlob-pParse->delta
+ ){
+ sz = 0;
+ n = 0;
+ }
+ *pSz = sz;
+ return n;
+}
+
+
+/*
+** Translate the binary JSONB representation of JSON beginning at
+** pParse->aBlob[i] into a JSON text string. Append the JSON
+** text onto the end of pOut. Return the index in pParse->aBlob[]
+** of the first byte past the end of the element that is translated.
+**
+** If an error is detected in the BLOB input, the pOut->eErr flag
+** might get set to JSTRING_MALFORMED. But not all BLOB input errors
+** are detected. So a malformed JSONB input might either result
+** in an error, or in incorrect JSON.
+**
+** The pOut->eErr JSTRING_OOM flag is set on a OOM.
+*/
+static u32 jsonTranslateBlobToText(
+ const JsonParse *pParse, /* the complete parse of the JSON */
+ u32 i, /* Start rendering at this index */
+ JsonString *pOut /* Write JSON here */
+){
+ u32 sz, n, j, iEnd;
+
+ n = jsonbPayloadSize(pParse, i, &sz);
+ if( n==0 ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ return pParse->nBlob+1;
+ }
+ switch( pParse->aBlob[i] & 0x0f ){
+ case JSONB_NULL: {
+ jsonAppendRawNZ(pOut, "null", 4);
+ return i+1;
+ }
+ case JSONB_TRUE: {
+ jsonAppendRawNZ(pOut, "true", 4);
+ return i+1;
+ }
+ case JSONB_FALSE: {
+ jsonAppendRawNZ(pOut, "false", 5);
+ return i+1;
+ }
+ case JSONB_INT:
+ case JSONB_FLOAT: {
+ if( sz==0 ) goto malformed_jsonb;
+ jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz);
+ break;
+ }
+ case JSONB_INT5: { /* Integer literal in hexadecimal notation */
+ u32 k = 2;
+ sqlite3_uint64 u = 0;
+ const char *zIn = (const char*)&pParse->aBlob[i+n];
+ int bOverflow = 0;
+ if( sz==0 ) goto malformed_jsonb;
+ if( zIn[0]=='-' ){
+ jsonAppendChar(pOut, '-');
+ k++;
+ }else if( zIn[0]=='+' ){
+ k++;
+ }
+ for(; k<sz; k++){
+ if( !sqlite3Isxdigit(zIn[k]) ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ break;
+ }else if( (u>>60)!=0 ){
+ bOverflow = 1;
+ }else{
+ u = u*16 + sqlite3HexToInt(zIn[k]);
+ }
}
+ jsonPrintf(100,pOut,bOverflow?"9.0e999":"%llu", u);
break;
}
- case JSON_OBJECT: {
- for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j+1)+1){
- pParse->aUp[i+j] = i;
- jsonParseFillInParentage(pParse, i+j+1, i);
+ case JSONB_FLOAT5: { /* Float literal missing digits beside "." */
+ u32 k = 0;
+ const char *zIn = (const char*)&pParse->aBlob[i+n];
+ if( sz==0 ) goto malformed_jsonb;
+ if( zIn[0]=='-' ){
+ jsonAppendChar(pOut, '-');
+ k++;
+ }
+ if( zIn[k]=='.' ){
+ jsonAppendChar(pOut, '0');
+ }
+ for(; k<sz; k++){
+ jsonAppendChar(pOut, zIn[k]);
+ if( zIn[k]=='.' && (k+1==sz || !sqlite3Isdigit(zIn[k+1])) ){
+ jsonAppendChar(pOut, '0');
+ }
+ }
+ break;
+ }
+ case JSONB_TEXT:
+ case JSONB_TEXTJ: {
+ jsonAppendChar(pOut, '"');
+ jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz);
+ jsonAppendChar(pOut, '"');
+ break;
+ }
+ case JSONB_TEXT5: {
+ const char *zIn;
+ u32 k;
+ u32 sz2 = sz;
+ zIn = (const char*)&pParse->aBlob[i+n];
+ jsonAppendChar(pOut, '"');
+ while( sz2>0 ){
+ for(k=0; k<sz2 && zIn[k]!='\\' && zIn[k]!='"'; k++){}
+ if( k>0 ){
+ jsonAppendRawNZ(pOut, zIn, k);
+ if( k>=sz2 ){
+ break;
+ }
+ zIn += k;
+ sz2 -= k;
+ }
+ if( zIn[0]=='"' ){
+ jsonAppendRawNZ(pOut, "\\\"", 2);
+ zIn++;
+ sz2--;
+ continue;
+ }
+ assert( zIn[0]=='\\' );
+ assert( sz2>=1 );
+ if( sz2<2 ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ break;
+ }
+ switch( (u8)zIn[1] ){
+ case '\'':
+ jsonAppendChar(pOut, '\'');
+ break;
+ case 'v':
+ jsonAppendRawNZ(pOut, "\\u0009", 6);
+ break;
+ case 'x':
+ if( sz2<4 ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ sz2 = 2;
+ break;
+ }
+ jsonAppendRawNZ(pOut, "\\u00", 4);
+ jsonAppendRawNZ(pOut, &zIn[2], 2);
+ zIn += 2;
+ sz2 -= 2;
+ break;
+ case '0':
+ jsonAppendRawNZ(pOut, "\\u0000", 6);
+ break;
+ case '\r':
+ if( sz2>2 && zIn[2]=='\n' ){
+ zIn++;
+ sz2--;
+ }
+ break;
+ case '\n':
+ break;
+ case 0xe2:
+ /* '\' followed by either U+2028 or U+2029 is ignored as
+ ** whitespace. Not that in UTF8, U+2028 is 0xe2 0x80 0x29.
+ ** U+2029 is the same except for the last byte */
+ if( sz2<4
+ || 0x80!=(u8)zIn[2]
+ || (0xa8!=(u8)zIn[3] && 0xa9!=(u8)zIn[3])
+ ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ sz2 = 2;
+ break;
+ }
+ zIn += 2;
+ sz2 -= 2;
+ break;
+ default:
+ jsonAppendRawNZ(pOut, zIn, 2);
+ break;
+ }
+ assert( sz2>=2 );
+ zIn += 2;
+ sz2 -= 2;
}
+ jsonAppendChar(pOut, '"');
+ break;
+ }
+ case JSONB_TEXTRAW: {
+ jsonAppendString(pOut, (const char*)&pParse->aBlob[i+n], sz);
+ break;
+ }
+ case JSONB_ARRAY: {
+ jsonAppendChar(pOut, '[');
+ j = i+n;
+ iEnd = j+sz;
+ while( j<iEnd && pOut->eErr==0 ){
+ j = jsonTranslateBlobToText(pParse, j, pOut);
+ jsonAppendChar(pOut, ',');
+ }
+ if( j>iEnd ) pOut->eErr |= JSTRING_MALFORMED;
+ if( sz>0 ) jsonStringTrimOneChar(pOut);
+ jsonAppendChar(pOut, ']');
+ break;
+ }
+ case JSONB_OBJECT: {
+ int x = 0;
+ jsonAppendChar(pOut, '{');
+ j = i+n;
+ iEnd = j+sz;
+ while( j<iEnd && pOut->eErr==0 ){
+ j = jsonTranslateBlobToText(pParse, j, pOut);
+ jsonAppendChar(pOut, (x++ & 1) ? ',' : ':');
+ }
+ if( (x & 1)!=0 || j>iEnd ) pOut->eErr |= JSTRING_MALFORMED;
+ if( sz>0 ) jsonStringTrimOneChar(pOut);
+ jsonAppendChar(pOut, '}');
break;
}
+
default: {
+ malformed_jsonb:
+ pOut->eErr |= JSTRING_MALFORMED;
break;
}
}
+ return i+n+sz;
+}
+
+/* Return true if the input pJson
+**
+** For performance reasons, this routine does not do a detailed check of the
+** input BLOB to ensure that it is well-formed. Hence, false positives are
+** possible. False negatives should never occur, however.
+*/
+static int jsonFuncArgMightBeBinary(sqlite3_value *pJson){
+ u32 sz, n;
+ const u8 *aBlob;
+ int nBlob;
+ JsonParse s;
+ if( sqlite3_value_type(pJson)!=SQLITE_BLOB ) return 0;
+ aBlob = sqlite3_value_blob(pJson);
+ nBlob = sqlite3_value_bytes(pJson);
+ if( nBlob<1 ) return 0;
+ if( NEVER(aBlob==0) || (aBlob[0] & 0x0f)>JSONB_OBJECT ) return 0;
+ memset(&s, 0, sizeof(s));
+ s.aBlob = (u8*)aBlob;
+ s.nBlob = nBlob;
+ n = jsonbPayloadSize(&s, 0, &sz);
+ if( n==0 ) return 0;
+ if( sz+n!=(u32)nBlob ) return 0;
+ if( (aBlob[0] & 0x0f)<=JSONB_FALSE && sz>0 ) return 0;
+ return sz+n==(u32)nBlob;
}
/*
-** Compute the parentage of all nodes in a completed parse.
+** Given that a JSONB_ARRAY object starts at offset i, return
+** the number of entries in that array.
*/
-static int jsonParseFindParents(JsonParse *pParse){
- u32 *aUp;
- assert( pParse->aUp==0 );
- aUp = pParse->aUp = sqlite3_malloc64( sizeof(u32)*pParse->nNode );
- if( aUp==0 ){
- pParse->oom = 1;
- return SQLITE_NOMEM;
+static u32 jsonbArrayCount(JsonParse *pParse, u32 iRoot){
+ u32 n, sz, i, iEnd;
+ u32 k = 0;
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ iEnd = iRoot+n+sz;
+ for(i=iRoot+n; n>0 && i<iEnd; i+=sz+n, k++){
+ n = jsonbPayloadSize(pParse, i, &sz);
}
- jsonParseFillInParentage(pParse, 0, 0);
- return SQLITE_OK;
+ return k;
}
/*
-** Magic number used for the JSON parse cache in sqlite3_get_auxdata()
+** Edit the payload size of the element at iRoot by the amount in
+** pParse->delta.
*/
-#define JSON_CACHE_ID (-429938) /* First cache entry */
-#define JSON_CACHE_SZ 4 /* Max number of cache entries */
+static void jsonAfterEditSizeAdjust(JsonParse *pParse, u32 iRoot){
+ u32 sz = 0;
+ u32 nBlob;
+ assert( pParse->delta!=0 );
+ assert( pParse->nBlobAlloc >= pParse->nBlob );
+ nBlob = pParse->nBlob;
+ pParse->nBlob = pParse->nBlobAlloc;
+ (void)jsonbPayloadSize(pParse, iRoot, &sz);
+ pParse->nBlob = nBlob;
+ sz += pParse->delta;
+ pParse->delta += jsonBlobChangePayloadSize(pParse, iRoot, sz);
+}
/*
-** Obtain a complete parse of the JSON found in the first argument
-** of the argv array. Use the sqlite3_get_auxdata() cache for this
-** parse if it is available. If the cache is not available or if it
-** is no longer valid, parse the JSON again and return the new parse,
-** and also register the new parse so that it will be available for
-** future sqlite3_get_auxdata() calls.
+** Modify the JSONB blob at pParse->aBlob by removing nDel bytes of
+** content beginning at iDel, and replacing them with nIns bytes of
+** content given by aIns.
**
-** If an error occurs and pErrCtx!=0 then report the error on pErrCtx
-** and return NULL.
+** nDel may be zero, in which case no bytes are removed. But iDel is
+** still important as new bytes will be insert beginning at iDel.
**
-** If an error occurs and pErrCtx==0 then return the Parse object with
-** JsonParse.nErr non-zero. If the caller invokes this routine with
-** pErrCtx==0 and it gets back a JsonParse with nErr!=0, then the caller
-** is responsible for invoking jsonParseFree() on the returned value.
-** But the caller may invoke jsonParseFree() *only* if pParse->nErr!=0.
+** aIns may be zero, in which case space is created to hold nIns bytes
+** beginning at iDel, but that space is uninitialized.
+**
+** Set pParse->oom if an OOM occurs.
*/
-static JsonParse *jsonParseCached(
- sqlite3_context *pCtx,
- sqlite3_value **argv,
- sqlite3_context *pErrCtx
+static void jsonBlobEdit(
+ JsonParse *pParse, /* The JSONB to be modified is in pParse->aBlob */
+ u32 iDel, /* First byte to be removed */
+ u32 nDel, /* Number of bytes to remove */
+ const u8 *aIns, /* Content to insert */
+ u32 nIns /* Bytes of content to insert */
){
- const char *zJson = (const char*)sqlite3_value_text(argv[0]);
- int nJson = sqlite3_value_bytes(argv[0]);
- JsonParse *p;
- JsonParse *pMatch = 0;
- int iKey;
- int iMinKey = 0;
- u32 iMinHold = 0xffffffff;
- u32 iMaxHold = 0;
- if( zJson==0 ) return 0;
- for(iKey=0; iKey<JSON_CACHE_SZ; iKey++){
- p = (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iKey);
- if( p==0 ){
- iMinKey = iKey;
- break;
+ i64 d = (i64)nIns - (i64)nDel;
+ if( d!=0 ){
+ if( pParse->nBlob + d > pParse->nBlobAlloc ){
+ jsonBlobExpand(pParse, pParse->nBlob+d);
+ if( pParse->oom ) return;
}
- if( pMatch==0
- && p->nJson==nJson
- && memcmp(p->zJson,zJson,nJson)==0
- ){
- p->nErr = 0;
- pMatch = p;
- }else if( p->iHold<iMinHold ){
- iMinHold = p->iHold;
- iMinKey = iKey;
+ memmove(&pParse->aBlob[iDel+nIns],
+ &pParse->aBlob[iDel+nDel],
+ pParse->nBlob - (iDel+nDel));
+ pParse->nBlob += d;
+ pParse->delta += d;
+ }
+ if( nIns && aIns ) memcpy(&pParse->aBlob[iDel], aIns, nIns);
+}
+
+/*
+** Return the number of escaped newlines to be ignored.
+** An escaped newline is a one of the following byte sequences:
+**
+** 0x5c 0x0a
+** 0x5c 0x0d
+** 0x5c 0x0d 0x0a
+** 0x5c 0xe2 0x80 0xa8
+** 0x5c 0xe2 0x80 0xa9
+*/
+static u32 jsonBytesToBypass(const char *z, u32 n){
+ u32 i = 0;
+ while( i+1<n ){
+ if( z[i]!='\\' ) return i;
+ if( z[i+1]=='\n' ){
+ i += 2;
+ continue;
}
- if( p->iHold>iMaxHold ){
- iMaxHold = p->iHold;
+ if( z[i+1]=='\r' ){
+ if( i+2<n && z[i+2]=='\n' ){
+ i += 3;
+ }else{
+ i += 2;
+ }
+ continue;
+ }
+ if( 0xe2==(u8)z[i+1]
+ && i+3<n
+ && 0x80==(u8)z[i+2]
+ && (0xa8==(u8)z[i+3] || 0xa9==(u8)z[i+3])
+ ){
+ i += 4;
+ continue;
}
+ break;
}
- if( pMatch ){
- pMatch->nErr = 0;
- pMatch->iHold = iMaxHold+1;
- return pMatch;
+ return i;
+}
+
+/*
+** Input z[0..n] defines JSON escape sequence including the leading '\\'.
+** Decode that escape sequence into a single character. Write that
+** character into *piOut. Return the number of bytes in the escape sequence.
+**
+** If there is a syntax error of some kind (for example too few characters
+** after the '\\' to complete the encoding) then *piOut is set to
+** JSON_INVALID_CHAR.
+*/
+static u32 jsonUnescapeOneChar(const char *z, u32 n, u32 *piOut){
+ assert( n>0 );
+ assert( z[0]=='\\' );
+ if( n<2 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
}
- p = sqlite3_malloc64( sizeof(*p) + nJson + 1 );
- if( p==0 ){
- sqlite3_result_error_nomem(pCtx);
- return 0;
+ switch( (u8)z[1] ){
+ case 'u': {
+ u32 v, vlo;
+ if( n<6 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
+ }
+ v = jsonHexToInt4(&z[2]);
+ if( (v & 0xfc00)==0xd800
+ && n>=12
+ && z[6]=='\\'
+ && z[7]=='u'
+ && ((vlo = jsonHexToInt4(&z[8]))&0xfc00)==0xdc00
+ ){
+ *piOut = ((v&0x3ff)<<10) + (vlo&0x3ff) + 0x10000;
+ return 12;
+ }else{
+ *piOut = v;
+ return 6;
+ }
+ }
+ case 'b': { *piOut = '\b'; return 2; }
+ case 'f': { *piOut = '\f'; return 2; }
+ case 'n': { *piOut = '\n'; return 2; }
+ case 'r': { *piOut = '\r'; return 2; }
+ case 't': { *piOut = '\t'; return 2; }
+ case 'v': { *piOut = '\v'; return 2; }
+ case '0': { *piOut = 0; return 2; }
+ case '\'':
+ case '"':
+ case '/':
+ case '\\':{ *piOut = z[1]; return 2; }
+ case 'x': {
+ if( n<4 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
+ }
+ *piOut = (jsonHexToInt(z[2])<<4) | jsonHexToInt(z[3]);
+ return 4;
+ }
+ case 0xe2:
+ case '\r':
+ case '\n': {
+ u32 nSkip = jsonBytesToBypass(z, n);
+ if( nSkip==0 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
+ }else if( nSkip==n ){
+ *piOut = 0;
+ return n;
+ }else if( z[nSkip]=='\\' ){
+ return nSkip + jsonUnescapeOneChar(&z[nSkip], n-nSkip, piOut);
+ }else{
+ int sz = sqlite3Utf8ReadLimited((u8*)&z[nSkip], n-nSkip, piOut);
+ return nSkip + sz;
+ }
+ }
+ default: {
+ *piOut = JSON_INVALID_CHAR;
+ return 2;
+ }
}
- memset(p, 0, sizeof(*p));
- p->zJson = (char*)&p[1];
- memcpy((char*)p->zJson, zJson, nJson+1);
- if( jsonParse(p, pErrCtx, p->zJson) ){
- if( pErrCtx==0 ){
- p->nErr = 1;
- return p;
+}
+
+
+/*
+** Compare two object labels. Return 1 if they are equal and
+** 0 if they differ.
+**
+** In this version, we know that one or the other or both of the
+** two comparands contains an escape sequence.
+*/
+static SQLITE_NOINLINE int jsonLabelCompareEscaped(
+ const char *zLeft, /* The left label */
+ u32 nLeft, /* Size of the left label in bytes */
+ int rawLeft, /* True if zLeft contains no escapes */
+ const char *zRight, /* The right label */
+ u32 nRight, /* Size of the right label in bytes */
+ int rawRight /* True if zRight is escape-free */
+){
+ u32 cLeft, cRight;
+ assert( rawLeft==0 || rawRight==0 );
+ while( 1 /*exit-by-return*/ ){
+ if( nLeft==0 ){
+ cLeft = 0;
+ }else if( rawLeft || zLeft[0]!='\\' ){
+ cLeft = ((u8*)zLeft)[0];
+ if( cLeft>=0xc0 ){
+ int sz = sqlite3Utf8ReadLimited((u8*)zLeft, nLeft, &cLeft);
+ zLeft += sz;
+ nLeft -= sz;
+ }else{
+ zLeft++;
+ nLeft--;
+ }
+ }else{
+ u32 n = jsonUnescapeOneChar(zLeft, nLeft, &cLeft);
+ zLeft += n;
+ assert( n<=nLeft );
+ nLeft -= n;
+ }
+ if( nRight==0 ){
+ cRight = 0;
+ }else if( rawRight || zRight[0]!='\\' ){
+ cRight = ((u8*)zRight)[0];
+ if( cRight>=0xc0 ){
+ int sz = sqlite3Utf8ReadLimited((u8*)zRight, nRight, &cRight);
+ zRight += sz;
+ nRight -= sz;
+ }else{
+ zRight++;
+ nRight--;
+ }
+ }else{
+ u32 n = jsonUnescapeOneChar(zRight, nRight, &cRight);
+ zRight += n;
+ assert( n<=nRight );
+ nRight -= n;
}
- sqlite3_free(p);
- return 0;
+ if( cLeft!=cRight ) return 0;
+ if( cLeft==0 ) return 1;
}
- p->nJson = nJson;
- p->iHold = iMaxHold+1;
- sqlite3_set_auxdata(pCtx, JSON_CACHE_ID+iMinKey, p,
- (void(*)(void*))jsonParseFree);
- return (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iMinKey);
}
/*
-** Compare the OBJECT label at pNode against zKey,nKey. Return true on
-** a match.
+** Compare two object labels. Return 1 if they are equal and
+** 0 if they differ. Return -1 if an OOM occurs.
*/
-static int jsonLabelCompare(const JsonNode *pNode, const char *zKey, u32 nKey){
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_RAW ){
- if( pNode->n!=nKey ) return 0;
- return strncmp(pNode->u.zJContent, zKey, nKey)==0;
+static int jsonLabelCompare(
+ const char *zLeft, /* The left label */
+ u32 nLeft, /* Size of the left label in bytes */
+ int rawLeft, /* True if zLeft contains no escapes */
+ const char *zRight, /* The right label */
+ u32 nRight, /* Size of the right label in bytes */
+ int rawRight /* True if zRight is escape-free */
+){
+ if( rawLeft && rawRight ){
+ /* Simpliest case: Neither label contains escapes. A simple
+ ** memcmp() is sufficient. */
+ if( nLeft!=nRight ) return 0;
+ return memcmp(zLeft, zRight, nLeft)==0;
}else{
- if( pNode->n!=nKey+2 ) return 0;
- return strncmp(pNode->u.zJContent+1, zKey, nKey)==0;
+ return jsonLabelCompareEscaped(zLeft, nLeft, rawLeft,
+ zRight, nRight, rawRight);
}
}
-static int jsonSameLabel(const JsonNode *p1, const JsonNode *p2){
- if( p1->jnFlags & JNODE_RAW ){
- return jsonLabelCompare(p2, p1->u.zJContent, p1->n);
- }else if( p2->jnFlags & JNODE_RAW ){
- return jsonLabelCompare(p1, p2->u.zJContent, p2->n);
+
+/*
+** Error returns from jsonLookupStep()
+*/
+#define JSON_LOOKUP_ERROR 0xffffffff
+#define JSON_LOOKUP_NOTFOUND 0xfffffffe
+#define JSON_LOOKUP_PATHERROR 0xfffffffd
+#define JSON_LOOKUP_ISERROR(x) ((x)>=JSON_LOOKUP_PATHERROR)
+
+/* Forward declaration */
+static u32 jsonLookupStep(JsonParse*,u32,const char*,u32);
+
+
+/* This helper routine for jsonLookupStep() populates pIns with
+** binary data that is to be inserted into pParse.
+**
+** In the common case, pIns just points to pParse->aIns and pParse->nIns.
+** But if the zPath of the original edit operation includes path elements
+** that go deeper, additional substructure must be created.
+**
+** For example:
+**
+** json_insert('{}', '$.a.b.c', 123);
+**
+** The search stops at '$.a' But additional substructure must be
+** created for the ".b.c" part of the patch so that the final result
+** is: {"a":{"b":{"c"::123}}}. This routine populates pIns with
+** the binary equivalent of {"b":{"c":123}} so that it can be inserted.
+**
+** The caller is responsible for resetting pIns when it has finished
+** using the substructure.
+*/
+static u32 jsonCreateEditSubstructure(
+ JsonParse *pParse, /* The original JSONB that is being edited */
+ JsonParse *pIns, /* Populate this with the blob data to insert */
+ const char *zTail /* Tail of the path that determins substructure */
+){
+ static const u8 emptyObject[] = { JSONB_ARRAY, JSONB_OBJECT };
+ int rc;
+ memset(pIns, 0, sizeof(*pIns));
+ pIns->db = pParse->db;
+ if( zTail[0]==0 ){
+ /* No substructure. Just insert what is given in pParse. */
+ pIns->aBlob = pParse->aIns;
+ pIns->nBlob = pParse->nIns;
+ rc = 0;
}else{
- return p1->n==p2->n && strncmp(p1->u.zJContent,p2->u.zJContent,p1->n)==0;
+ /* Construct the binary substructure */
+ pIns->nBlob = 1;
+ pIns->aBlob = (u8*)&emptyObject[zTail[0]=='.'];
+ pIns->eEdit = pParse->eEdit;
+ pIns->nIns = pParse->nIns;
+ pIns->aIns = pParse->aIns;
+ rc = jsonLookupStep(pIns, 0, zTail, 0);
+ pParse->oom |= pIns->oom;
}
+ return rc; /* Error code only */
}
-/* forward declaration */
-static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**);
-
/*
-** Search along zPath to find the node specified. Return a pointer
-** to that node, or NULL if zPath is malformed or if there is no such
-** node.
+** Search along zPath to find the Json element specified. Return an
+** index into pParse->aBlob[] for the start of that element's value.
+**
+** If the value found by this routine is the value half of label/value pair
+** within an object, then set pPath->iLabel to the start of the corresponding
+** label, before returning.
+**
+** Return one of the JSON_LOOKUP error codes if problems are seen.
**
-** If pApnd!=0, then try to append new nodes to complete zPath if it is
-** possible to do so and if no existing node corresponds to zPath. If
-** new nodes are appended *pApnd is set to 1.
+** This routine will also modify the blob. If pParse->eEdit is one of
+** JEDIT_DEL, JEDIT_REPL, JEDIT_INS, or JEDIT_SET, then changes might be
+** made to the selected value. If an edit is performed, then the return
+** value does not necessarily point to the select element. If an edit
+** is performed, the return value is only useful for detecting error
+** conditions.
*/
-static JsonNode *jsonLookupStep(
+static u32 jsonLookupStep(
JsonParse *pParse, /* The JSON to search */
- u32 iRoot, /* Begin the search at this node */
+ u32 iRoot, /* Begin the search at this element of aBlob[] */
const char *zPath, /* The path to search */
- int *pApnd, /* Append nodes to complete path if not NULL */
- const char **pzErr /* Make *pzErr point to any syntax error in zPath */
+ u32 iLabel /* Label if iRoot is a value of in an object */
){
- u32 i, j, nKey;
+ u32 i, j, k, nKey, sz, n, iEnd, rc;
const char *zKey;
- JsonNode *pRoot = &pParse->aNode[iRoot];
- if( zPath[0]==0 ) return pRoot;
- if( pRoot->jnFlags & JNODE_REPLACE ) return 0;
+ u8 x;
+
+ if( zPath[0]==0 ){
+ if( pParse->eEdit && jsonBlobMakeEditable(pParse, pParse->nIns) ){
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ sz += n;
+ if( pParse->eEdit==JEDIT_DEL ){
+ if( iLabel>0 ){
+ sz += iRoot - iLabel;
+ iRoot = iLabel;
+ }
+ jsonBlobEdit(pParse, iRoot, sz, 0, 0);
+ }else if( pParse->eEdit==JEDIT_INS ){
+ /* Already exists, so json_insert() is a no-op */
+ }else{
+ /* json_set() or json_replace() */
+ jsonBlobEdit(pParse, iRoot, sz, pParse->aIns, pParse->nIns);
+ }
+ }
+ pParse->iLabel = iLabel;
+ return iRoot;
+ }
if( zPath[0]=='.' ){
- if( pRoot->eType!=JSON_OBJECT ) return 0;
+ int rawKey = 1;
+ x = pParse->aBlob[iRoot];
zPath++;
if( zPath[0]=='"' ){
zKey = zPath + 1;
@@ -201452,303 +205892,849 @@ static JsonNode *jsonLookupStep(
if( zPath[i] ){
i++;
}else{
- *pzErr = zPath;
- return 0;
+ return JSON_LOOKUP_PATHERROR;
}
testcase( nKey==0 );
+ rawKey = memchr(zKey, '\\', nKey)==0;
}else{
zKey = zPath;
for(i=0; zPath[i] && zPath[i]!='.' && zPath[i]!='['; i++){}
nKey = i;
if( nKey==0 ){
- *pzErr = zPath;
- return 0;
- }
- }
- j = 1;
- for(;;){
- while( j<=pRoot->n ){
- if( jsonLabelCompare(pRoot+j, zKey, nKey) ){
- return jsonLookupStep(pParse, iRoot+j+1, &zPath[i], pApnd, pzErr);
- }
- j++;
- j += jsonNodeSize(&pRoot[j]);
+ return JSON_LOOKUP_PATHERROR;
+ }
+ }
+ if( (x & 0x0f)!=JSONB_OBJECT ) return JSON_LOOKUP_NOTFOUND;
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ j = iRoot + n; /* j is the index of a label */
+ iEnd = j+sz;
+ while( j<iEnd ){
+ int rawLabel;
+ const char *zLabel;
+ x = pParse->aBlob[j] & 0x0f;
+ if( x<JSONB_TEXT || x>JSONB_TEXTRAW ) return JSON_LOOKUP_ERROR;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return JSON_LOOKUP_ERROR;
+ k = j+n; /* k is the index of the label text */
+ if( k+sz>=iEnd ) return JSON_LOOKUP_ERROR;
+ zLabel = (const char*)&pParse->aBlob[k];
+ rawLabel = x==JSONB_TEXT || x==JSONB_TEXTRAW;
+ if( jsonLabelCompare(zKey, nKey, rawKey, zLabel, sz, rawLabel) ){
+ u32 v = k+sz; /* v is the index of the value */
+ if( ((pParse->aBlob[v])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR;
+ n = jsonbPayloadSize(pParse, v, &sz);
+ if( n==0 || v+n+sz>iEnd ) return JSON_LOOKUP_ERROR;
+ assert( j>0 );
+ rc = jsonLookupStep(pParse, v, &zPath[i], j);
+ if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ return rc;
}
- if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
- assert( pRoot->eU==2 );
- iRoot += pRoot->u.iAppend;
- pRoot = &pParse->aNode[iRoot];
- j = 1;
- }
- if( pApnd ){
- u32 iStart, iLabel;
- JsonNode *pNode;
- iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
- iLabel = jsonParseAddNode(pParse, JSON_STRING, nKey, zKey);
- zPath += i;
- pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
- if( pParse->oom ) return 0;
- if( pNode ){
- pRoot = &pParse->aNode[iRoot];
- assert( pRoot->eU==0 );
- pRoot->u.iAppend = iStart - iRoot;
- pRoot->jnFlags |= JNODE_APPEND;
- VVA( pRoot->eU = 2 );
- pParse->aNode[iLabel].jnFlags |= JNODE_RAW;
- }
- return pNode;
+ j = k+sz;
+ if( ((pParse->aBlob[j])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return JSON_LOOKUP_ERROR;
+ j += n+sz;
+ }
+ if( j>iEnd ) return JSON_LOOKUP_ERROR;
+ if( pParse->eEdit>=JEDIT_INS ){
+ u32 nIns; /* Total bytes to insert (label+value) */
+ JsonParse v; /* BLOB encoding of the value to be inserted */
+ JsonParse ix; /* Header of the label to be inserted */
+ testcase( pParse->eEdit==JEDIT_INS );
+ testcase( pParse->eEdit==JEDIT_SET );
+ memset(&ix, 0, sizeof(ix));
+ ix.db = pParse->db;
+ jsonBlobAppendNode(&ix, rawKey?JSONB_TEXTRAW:JSONB_TEXT5, nKey, 0);
+ pParse->oom |= ix.oom;
+ rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i]);
+ if( !JSON_LOOKUP_ISERROR(rc)
+ && jsonBlobMakeEditable(pParse, ix.nBlob+nKey+v.nBlob)
+ ){
+ assert( !pParse->oom );
+ nIns = ix.nBlob + nKey + v.nBlob;
+ jsonBlobEdit(pParse, j, 0, 0, nIns);
+ if( !pParse->oom ){
+ assert( pParse->aBlob!=0 ); /* Because pParse->oom!=0 */
+ assert( ix.aBlob!=0 ); /* Because pPasre->oom!=0 */
+ memcpy(&pParse->aBlob[j], ix.aBlob, ix.nBlob);
+ k = j + ix.nBlob;
+ memcpy(&pParse->aBlob[k], zKey, nKey);
+ k += nKey;
+ memcpy(&pParse->aBlob[k], v.aBlob, v.nBlob);
+ if( ALWAYS(pParse->delta) ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ }
+ }
+ jsonParseReset(&v);
+ jsonParseReset(&ix);
+ return rc;
}
}else if( zPath[0]=='[' ){
- i = 0;
- j = 1;
- while( sqlite3Isdigit(zPath[j]) ){
- i = i*10 + zPath[j] - '0';
- j++;
+ x = pParse->aBlob[iRoot] & 0x0f;
+ if( x!=JSONB_ARRAY ) return JSON_LOOKUP_NOTFOUND;
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ k = 0;
+ i = 1;
+ while( sqlite3Isdigit(zPath[i]) ){
+ k = k*10 + zPath[i] - '0';
+ i++;
}
- if( j<2 || zPath[j]!=']' ){
+ if( i<2 || zPath[i]!=']' ){
if( zPath[1]=='#' ){
- JsonNode *pBase = pRoot;
- int iBase = iRoot;
- if( pRoot->eType!=JSON_ARRAY ) return 0;
- for(;;){
- while( j<=pBase->n ){
- if( (pBase[j].jnFlags & JNODE_REMOVE)==0 ) i++;
- j += jsonNodeSize(&pBase[j]);
- }
- if( (pBase->jnFlags & JNODE_APPEND)==0 ) break;
- assert( pBase->eU==2 );
- iBase += pBase->u.iAppend;
- pBase = &pParse->aNode[iBase];
- j = 1;
- }
- j = 2;
+ k = jsonbArrayCount(pParse, iRoot);
+ i = 2;
if( zPath[2]=='-' && sqlite3Isdigit(zPath[3]) ){
- unsigned int x = 0;
- j = 3;
+ unsigned int nn = 0;
+ i = 3;
do{
- x = x*10 + zPath[j] - '0';
- j++;
- }while( sqlite3Isdigit(zPath[j]) );
- if( x>i ) return 0;
- i -= x;
+ nn = nn*10 + zPath[i] - '0';
+ i++;
+ }while( sqlite3Isdigit(zPath[i]) );
+ if( nn>k ) return JSON_LOOKUP_NOTFOUND;
+ k -= nn;
}
- if( zPath[j]!=']' ){
- *pzErr = zPath;
- return 0;
+ if( zPath[i]!=']' ){
+ return JSON_LOOKUP_PATHERROR;
}
}else{
- *pzErr = zPath;
- return 0;
+ return JSON_LOOKUP_PATHERROR;
}
}
- if( pRoot->eType!=JSON_ARRAY ) return 0;
- zPath += j + 1;
- j = 1;
- for(;;){
- while( j<=pRoot->n && (i>0 || (pRoot[j].jnFlags & JNODE_REMOVE)!=0) ){
- if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 ) i--;
- j += jsonNodeSize(&pRoot[j]);
- }
- if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
- assert( pRoot->eU==2 );
- iRoot += pRoot->u.iAppend;
- pRoot = &pParse->aNode[iRoot];
- j = 1;
- }
- if( j<=pRoot->n ){
- return jsonLookupStep(pParse, iRoot+j, zPath, pApnd, pzErr);
- }
- if( i==0 && pApnd ){
- u32 iStart;
- JsonNode *pNode;
- iStart = jsonParseAddNode(pParse, JSON_ARRAY, 1, 0);
- pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
- if( pParse->oom ) return 0;
- if( pNode ){
- pRoot = &pParse->aNode[iRoot];
- assert( pRoot->eU==0 );
- pRoot->u.iAppend = iStart - iRoot;
- pRoot->jnFlags |= JNODE_APPEND;
- VVA( pRoot->eU = 2 );
+ j = iRoot+n;
+ iEnd = j+sz;
+ while( j<iEnd ){
+ if( k==0 ){
+ rc = jsonLookupStep(pParse, j, &zPath[i+1], 0);
+ if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ return rc;
}
- return pNode;
+ k--;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return JSON_LOOKUP_ERROR;
+ j += n+sz;
+ }
+ if( j>iEnd ) return JSON_LOOKUP_ERROR;
+ if( k>0 ) return JSON_LOOKUP_NOTFOUND;
+ if( pParse->eEdit>=JEDIT_INS ){
+ JsonParse v;
+ testcase( pParse->eEdit==JEDIT_INS );
+ testcase( pParse->eEdit==JEDIT_SET );
+ rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i+1]);
+ if( !JSON_LOOKUP_ISERROR(rc)
+ && jsonBlobMakeEditable(pParse, v.nBlob)
+ ){
+ assert( !pParse->oom );
+ jsonBlobEdit(pParse, j, 0, v.aBlob, v.nBlob);
+ }
+ jsonParseReset(&v);
+ if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ return rc;
}
}else{
- *pzErr = zPath;
+ return JSON_LOOKUP_PATHERROR;
}
- return 0;
+ return JSON_LOOKUP_NOTFOUND;
}
/*
-** Append content to pParse that will complete zPath. Return a pointer
-** to the inserted node, or return NULL if the append fails.
+** Convert a JSON BLOB into text and make that text the return value
+** of an SQL function.
*/
-static JsonNode *jsonLookupAppend(
- JsonParse *pParse, /* Append content to the JSON parse */
- const char *zPath, /* Description of content to append */
- int *pApnd, /* Set this flag to 1 */
- const char **pzErr /* Make this point to any syntax error */
+static void jsonReturnTextJsonFromBlob(
+ sqlite3_context *ctx,
+ const u8 *aBlob,
+ u32 nBlob
){
- *pApnd = 1;
- if( zPath[0]==0 ){
- jsonParseAddNode(pParse, JSON_NULL, 0, 0);
- return pParse->oom ? 0 : &pParse->aNode[pParse->nNode-1];
- }
- if( zPath[0]=='.' ){
- jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
- }else if( strncmp(zPath,"[0]",3)==0 ){
- jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
- }else{
- return 0;
- }
- if( pParse->oom ) return 0;
- return jsonLookupStep(pParse, pParse->nNode-1, zPath, pApnd, pzErr);
+ JsonParse x;
+ JsonString s;
+
+ if( NEVER(aBlob==0) ) return;
+ memset(&x, 0, sizeof(x));
+ x.aBlob = (u8*)aBlob;
+ x.nBlob = nBlob;
+ jsonStringInit(&s, ctx);
+ jsonTranslateBlobToText(&x, 0, &s);
+ jsonReturnString(&s, 0, 0);
}
+
/*
-** Return the text of a syntax error message on a JSON path. Space is
-** obtained from sqlite3_malloc().
+** Return the value of the BLOB node at index i.
+**
+** If the value is a primitive, return it as an SQL value.
+** If the value is an array or object, return it as either
+** JSON text or the BLOB encoding, depending on the JSON_B flag
+** on the userdata.
*/
-static char *jsonPathSyntaxError(const char *zErr){
- return sqlite3_mprintf("JSON path error near '%q'", zErr);
+static void jsonReturnFromBlob(
+ JsonParse *pParse, /* Complete JSON parse tree */
+ u32 i, /* Index of the node */
+ sqlite3_context *pCtx, /* Return value for this function */
+ int textOnly /* return text JSON. Disregard user-data */
+){
+ u32 n, sz;
+ int rc;
+ sqlite3 *db = sqlite3_context_db_handle(pCtx);
+
+ n = jsonbPayloadSize(pParse, i, &sz);
+ if( n==0 ){
+ sqlite3_result_error(pCtx, "malformed JSON", -1);
+ return;
+ }
+ switch( pParse->aBlob[i] & 0x0f ){
+ case JSONB_NULL: {
+ if( sz ) goto returnfromblob_malformed;
+ sqlite3_result_null(pCtx);
+ break;
+ }
+ case JSONB_TRUE: {
+ if( sz ) goto returnfromblob_malformed;
+ sqlite3_result_int(pCtx, 1);
+ break;
+ }
+ case JSONB_FALSE: {
+ if( sz ) goto returnfromblob_malformed;
+ sqlite3_result_int(pCtx, 0);
+ break;
+ }
+ case JSONB_INT5:
+ case JSONB_INT: {
+ sqlite3_int64 iRes = 0;
+ char *z;
+ int bNeg = 0;
+ char x;
+ if( sz==0 ) goto returnfromblob_malformed;
+ x = (char)pParse->aBlob[i+n];
+ if( x=='-' ){
+ if( sz<2 ) goto returnfromblob_malformed;
+ n++;
+ sz--;
+ bNeg = 1;
+ }
+ z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz);
+ if( z==0 ) goto returnfromblob_oom;
+ rc = sqlite3DecOrHexToI64(z, &iRes);
+ sqlite3DbFree(db, z);
+ if( rc==0 ){
+ sqlite3_result_int64(pCtx, bNeg ? -iRes : iRes);
+ }else if( rc==3 && bNeg ){
+ sqlite3_result_int64(pCtx, SMALLEST_INT64);
+ }else if( rc==1 ){
+ goto returnfromblob_malformed;
+ }else{
+ if( bNeg ){ n--; sz++; }
+ goto to_double;
+ }
+ break;
+ }
+ case JSONB_FLOAT5:
+ case JSONB_FLOAT: {
+ double r;
+ char *z;
+ if( sz==0 ) goto returnfromblob_malformed;
+ to_double:
+ z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz);
+ if( z==0 ) goto returnfromblob_oom;
+ rc = sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8);
+ sqlite3DbFree(db, z);
+ if( rc<=0 ) goto returnfromblob_malformed;
+ sqlite3_result_double(pCtx, r);
+ break;
+ }
+ case JSONB_TEXTRAW:
+ case JSONB_TEXT: {
+ sqlite3_result_text(pCtx, (char*)&pParse->aBlob[i+n], sz,
+ SQLITE_TRANSIENT);
+ break;
+ }
+ case JSONB_TEXT5:
+ case JSONB_TEXTJ: {
+ /* Translate JSON formatted string into raw text */
+ u32 iIn, iOut;
+ const char *z;
+ char *zOut;
+ u32 nOut = sz;
+ z = (const char*)&pParse->aBlob[i+n];
+ zOut = sqlite3DbMallocRaw(db, nOut+1);
+ if( zOut==0 ) goto returnfromblob_oom;
+ for(iIn=iOut=0; iIn<sz; iIn++){
+ char c = z[iIn];
+ if( c=='\\' ){
+ u32 v;
+ u32 szEscape = jsonUnescapeOneChar(&z[iIn], sz-iIn, &v);
+ if( v<=0x7f ){
+ zOut[iOut++] = (char)v;
+ }else if( v<=0x7ff ){
+ assert( szEscape>=2 );
+ zOut[iOut++] = (char)(0xc0 | (v>>6));
+ zOut[iOut++] = 0x80 | (v&0x3f);
+ }else if( v<0x10000 ){
+ assert( szEscape>=3 );
+ zOut[iOut++] = 0xe0 | (v>>12);
+ zOut[iOut++] = 0x80 | ((v>>6)&0x3f);
+ zOut[iOut++] = 0x80 | (v&0x3f);
+ }else if( v==JSON_INVALID_CHAR ){
+ /* Silently ignore illegal unicode */
+ }else{
+ assert( szEscape>=4 );
+ zOut[iOut++] = 0xf0 | (v>>18);
+ zOut[iOut++] = 0x80 | ((v>>12)&0x3f);
+ zOut[iOut++] = 0x80 | ((v>>6)&0x3f);
+ zOut[iOut++] = 0x80 | (v&0x3f);
+ }
+ iIn += szEscape - 1;
+ }else{
+ zOut[iOut++] = c;
+ }
+ } /* end for() */
+ assert( iOut<=nOut );
+ zOut[iOut] = 0;
+ sqlite3_result_text(pCtx, zOut, iOut, SQLITE_DYNAMIC);
+ break;
+ }
+ case JSONB_ARRAY:
+ case JSONB_OBJECT: {
+ int flags = textOnly ? 0 : SQLITE_PTR_TO_INT(sqlite3_user_data(pCtx));
+ if( flags & JSON_BLOB ){
+ sqlite3_result_blob(pCtx, &pParse->aBlob[i], sz+n, SQLITE_TRANSIENT);
+ }else{
+ jsonReturnTextJsonFromBlob(pCtx, &pParse->aBlob[i], sz+n);
+ }
+ break;
+ }
+ default: {
+ goto returnfromblob_malformed;
+ }
+ }
+ return;
+
+returnfromblob_oom:
+ sqlite3_result_error_nomem(pCtx);
+ return;
+
+returnfromblob_malformed:
+ sqlite3_result_error(pCtx, "malformed JSON", -1);
+ return;
}
/*
-** Do a node lookup using zPath. Return a pointer to the node on success.
-** Return NULL if not found or if there is an error.
+** pArg is a function argument that might be an SQL value or a JSON
+** value. Figure out what it is and encode it as a JSONB blob.
+** Return the results in pParse.
**
-** On an error, write an error message into pCtx and increment the
-** pParse->nErr counter.
+** pParse is uninitialized upon entry. This routine will handle the
+** initialization of pParse. The result will be contained in
+** pParse->aBlob and pParse->nBlob. pParse->aBlob might be dynamically
+** allocated (if pParse->nBlobAlloc is greater than zero) in which case
+** the caller is responsible for freeing the space allocated to pParse->aBlob
+** when it has finished with it. Or pParse->aBlob might be a static string
+** or a value obtained from sqlite3_value_blob(pArg).
**
-** If pApnd!=NULL then try to append missing nodes and set *pApnd = 1 if
-** nodes are appended.
+** If the argument is a BLOB that is clearly not a JSONB, then this
+** function might set an error message in ctx and return non-zero.
+** It might also set an error message and return non-zero on an OOM error.
*/
-static JsonNode *jsonLookup(
- JsonParse *pParse, /* The JSON to search */
- const char *zPath, /* The path to search */
- int *pApnd, /* Append nodes to complete path if not NULL */
- sqlite3_context *pCtx /* Report errors here, if not NULL */
-){
- const char *zErr = 0;
- JsonNode *pNode = 0;
- char *zMsg;
-
- if( zPath==0 ) return 0;
- if( zPath[0]!='$' ){
- zErr = zPath;
- goto lookup_err;
+static int jsonFunctionArgToBlob(
+ sqlite3_context *ctx,
+ sqlite3_value *pArg,
+ JsonParse *pParse
+){
+ int eType = sqlite3_value_type(pArg);
+ static u8 aNull[] = { 0x00 };
+ memset(pParse, 0, sizeof(pParse[0]));
+ pParse->db = sqlite3_context_db_handle(ctx);
+ switch( eType ){
+ default: {
+ pParse->aBlob = aNull;
+ pParse->nBlob = 1;
+ return 0;
+ }
+ case SQLITE_BLOB: {
+ if( jsonFuncArgMightBeBinary(pArg) ){
+ pParse->aBlob = (u8*)sqlite3_value_blob(pArg);
+ pParse->nBlob = sqlite3_value_bytes(pArg);
+ }else{
+ sqlite3_result_error(ctx, "JSON cannot hold BLOB values", -1);
+ return 1;
+ }
+ break;
+ }
+ case SQLITE_TEXT: {
+ const char *zJson = (const char*)sqlite3_value_text(pArg);
+ int nJson = sqlite3_value_bytes(pArg);
+ if( zJson==0 ) return 1;
+ if( sqlite3_value_subtype(pArg)==JSON_SUBTYPE ){
+ pParse->zJson = (char*)zJson;
+ pParse->nJson = nJson;
+ if( jsonConvertTextToBlob(pParse, ctx) ){
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ sqlite3DbFree(pParse->db, pParse->aBlob);
+ memset(pParse, 0, sizeof(pParse[0]));
+ return 1;
+ }
+ }else{
+ jsonBlobAppendNode(pParse, JSONB_TEXTRAW, nJson, zJson);
+ }
+ break;
+ }
+ case SQLITE_FLOAT: {
+ double r = sqlite3_value_double(pArg);
+ if( NEVER(sqlite3IsNaN(r)) ){
+ jsonBlobAppendNode(pParse, JSONB_NULL, 0, 0);
+ }else{
+ int n = sqlite3_value_bytes(pArg);
+ const char *z = (const char*)sqlite3_value_text(pArg);
+ if( z==0 ) return 1;
+ if( z[0]=='I' ){
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999");
+ }else if( z[0]=='-' && z[1]=='I' ){
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999");
+ }else{
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, n, z);
+ }
+ }
+ break;
+ }
+ case SQLITE_INTEGER: {
+ int n = sqlite3_value_bytes(pArg);
+ const char *z = (const char*)sqlite3_value_text(pArg);
+ if( z==0 ) return 1;
+ jsonBlobAppendNode(pParse, JSONB_INT, n, z);
+ break;
+ }
+ }
+ if( pParse->oom ){
+ sqlite3_result_error_nomem(ctx);
+ return 1;
+ }else{
+ return 0;
}
- zPath++;
- pNode = jsonLookupStep(pParse, 0, zPath, pApnd, &zErr);
- if( zErr==0 ) return pNode;
+}
-lookup_err:
- pParse->nErr++;
- assert( zErr!=0 && pCtx!=0 );
- zMsg = jsonPathSyntaxError(zErr);
+/*
+** Generate a bad path error.
+**
+** If ctx is not NULL then push the error message into ctx and return NULL.
+** If ctx is NULL, then return the text of the error message.
+*/
+static char *jsonBadPathError(
+ sqlite3_context *ctx, /* The function call containing the error */
+ const char *zPath /* The path with the problem */
+){
+ char *zMsg = sqlite3_mprintf("bad JSON path: %Q", zPath);
+ if( ctx==0 ) return zMsg;
if( zMsg ){
- sqlite3_result_error(pCtx, zMsg, -1);
+ sqlite3_result_error(ctx, zMsg, -1);
sqlite3_free(zMsg);
}else{
- sqlite3_result_error_nomem(pCtx);
+ sqlite3_result_error_nomem(ctx);
}
return 0;
}
+/* argv[0] is a BLOB that seems likely to be a JSONB. Subsequent
+** arguments come in parse where each pair contains a JSON path and
+** content to insert or set at that patch. Do the updates
+** and return the result.
+**
+** The specific operation is determined by eEdit, which can be one
+** of JEDIT_INS, JEDIT_REPL, or JEDIT_SET.
+*/
+static void jsonInsertIntoBlob(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv,
+ int eEdit /* JEDIT_INS, JEDIT_REPL, or JEDIT_SET */
+){
+ int i;
+ u32 rc = 0;
+ const char *zPath = 0;
+ int flgs;
+ JsonParse *p;
+ JsonParse ax;
+
+ assert( (argc&1)==1 );
+ flgs = argc==1 ? 0 : JSON_EDITABLE;
+ p = jsonParseFuncArg(ctx, argv[0], flgs);
+ if( p==0 ) return;
+ for(i=1; i<argc-1; i+=2){
+ if( sqlite3_value_type(argv[i])==SQLITE_NULL ) continue;
+ zPath = (const char*)sqlite3_value_text(argv[i]);
+ if( zPath==0 ){
+ sqlite3_result_error_nomem(ctx);
+ jsonParseFree(p);
+ return;
+ }
+ if( zPath[0]!='$' ) goto jsonInsertIntoBlob_patherror;
+ if( jsonFunctionArgToBlob(ctx, argv[i+1], &ax) ){
+ jsonParseReset(&ax);
+ jsonParseFree(p);
+ return;
+ }
+ if( zPath[1]==0 ){
+ if( eEdit==JEDIT_REPL || eEdit==JEDIT_SET ){
+ jsonBlobEdit(p, 0, p->nBlob, ax.aBlob, ax.nBlob);
+ }
+ rc = 0;
+ }else{
+ p->eEdit = eEdit;
+ p->nIns = ax.nBlob;
+ p->aIns = ax.aBlob;
+ p->delta = 0;
+ rc = jsonLookupStep(p, 0, zPath+1, 0);
+ }
+ jsonParseReset(&ax);
+ if( rc==JSON_LOOKUP_NOTFOUND ) continue;
+ if( JSON_LOOKUP_ISERROR(rc) ) goto jsonInsertIntoBlob_patherror;
+ }
+ jsonReturnParse(ctx, p);
+ jsonParseFree(p);
+ return;
+
+jsonInsertIntoBlob_patherror:
+ jsonParseFree(p);
+ if( rc==JSON_LOOKUP_ERROR ){
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }else{
+ jsonBadPathError(ctx, zPath);
+ }
+ return;
+}
/*
-** Report the wrong number of arguments for json_insert(), json_replace()
-** or json_set().
+** If pArg is a blob that seems like a JSONB blob, then initialize
+** p to point to that JSONB and return TRUE. If pArg does not seem like
+** a JSONB blob, then return FALSE;
+**
+** This routine is only called if it is already known that pArg is a
+** blob. The only open question is whether or not the blob appears
+** to be a JSONB blob.
*/
-static void jsonWrongNumArgs(
- sqlite3_context *pCtx,
- const char *zFuncName
-){
- char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments",
- zFuncName);
- sqlite3_result_error(pCtx, zMsg, -1);
- sqlite3_free(zMsg);
+static int jsonArgIsJsonb(sqlite3_value *pArg, JsonParse *p){
+ u32 n, sz = 0;
+ p->aBlob = (u8*)sqlite3_value_blob(pArg);
+ p->nBlob = (u32)sqlite3_value_bytes(pArg);
+ if( p->nBlob==0 ){
+ p->aBlob = 0;
+ return 0;
+ }
+ if( NEVER(p->aBlob==0) ){
+ return 0;
+ }
+ if( (p->aBlob[0] & 0x0f)<=JSONB_OBJECT
+ && (n = jsonbPayloadSize(p, 0, &sz))>0
+ && sz+n==p->nBlob
+ && ((p->aBlob[0] & 0x0f)>JSONB_FALSE || sz==0)
+ ){
+ return 1;
+ }
+ p->aBlob = 0;
+ p->nBlob = 0;
+ return 0;
}
/*
-** Mark all NULL entries in the Object passed in as JNODE_REMOVE.
+** Generate a JsonParse object, containing valid JSONB in aBlob and nBlob,
+** from the SQL function argument pArg. Return a pointer to the new
+** JsonParse object.
+**
+** Ownership of the new JsonParse object is passed to the caller. The
+** caller should invoke jsonParseFree() on the return value when it
+** has finished using it.
+**
+** If any errors are detected, an appropriate error messages is set
+** using sqlite3_result_error() or the equivalent and this routine
+** returns NULL. This routine also returns NULL if the pArg argument
+** is an SQL NULL value, but no error message is set in that case. This
+** is so that SQL functions that are given NULL arguments will return
+** a NULL value.
*/
-static void jsonRemoveAllNulls(JsonNode *pNode){
- int i, n;
- assert( pNode->eType==JSON_OBJECT );
- n = pNode->n;
- for(i=2; i<=n; i += jsonNodeSize(&pNode[i])+1){
- switch( pNode[i].eType ){
- case JSON_NULL:
- pNode[i].jnFlags |= JNODE_REMOVE;
- break;
- case JSON_OBJECT:
- jsonRemoveAllNulls(&pNode[i]);
- break;
+static JsonParse *jsonParseFuncArg(
+ sqlite3_context *ctx,
+ sqlite3_value *pArg,
+ u32 flgs
+){
+ int eType; /* Datatype of pArg */
+ JsonParse *p = 0; /* Value to be returned */
+ JsonParse *pFromCache = 0; /* Value taken from cache */
+ sqlite3 *db; /* The database connection */
+
+ assert( ctx!=0 );
+ eType = sqlite3_value_type(pArg);
+ if( eType==SQLITE_NULL ){
+ return 0;
+ }
+ pFromCache = jsonCacheSearch(ctx, pArg);
+ if( pFromCache ){
+ pFromCache->nJPRef++;
+ if( (flgs & JSON_EDITABLE)==0 ){
+ return pFromCache;
}
}
-}
+ db = sqlite3_context_db_handle(ctx);
+rebuild_from_cache:
+ p = sqlite3DbMallocZero(db, sizeof(*p));
+ if( p==0 ) goto json_pfa_oom;
+ memset(p, 0, sizeof(*p));
+ p->db = db;
+ p->nJPRef = 1;
+ if( pFromCache!=0 ){
+ u32 nBlob = pFromCache->nBlob;
+ p->aBlob = sqlite3DbMallocRaw(db, nBlob);
+ if( p->aBlob==0 ) goto json_pfa_oom;
+ memcpy(p->aBlob, pFromCache->aBlob, nBlob);
+ p->nBlobAlloc = p->nBlob = nBlob;
+ p->hasNonstd = pFromCache->hasNonstd;
+ jsonParseFree(pFromCache);
+ return p;
+ }
+ if( eType==SQLITE_BLOB ){
+ if( jsonArgIsJsonb(pArg,p) ){
+ if( (flgs & JSON_EDITABLE)!=0 && jsonBlobMakeEditable(p, 0)==0 ){
+ goto json_pfa_oom;
+ }
+ return p;
+ }
+ /* If the blob is not valid JSONB, fall through into trying to cast
+ ** the blob into text which is then interpreted as JSON. (tag-20240123-a)
+ **
+ ** This goes against all historical documentation about how the SQLite
+ ** JSON functions were suppose to work. From the beginning, blob was
+ ** reserved for expansion and a blob value should have raised an error.
+ ** But it did not, due to a bug. And many applications came to depend
+ ** upon this buggy behavior, espeically when using the CLI and reading
+ ** JSON text using readfile(), which returns a blob. For this reason
+ ** we will continue to support the bug moving forward.
+ ** See for example https://sqlite.org/forum/forumpost/012136abd5292b8d
+ */
+ }
+ p->zJson = (char*)sqlite3_value_text(pArg);
+ p->nJson = sqlite3_value_bytes(pArg);
+ if( p->nJson==0 ) goto json_pfa_malformed;
+ if( NEVER(p->zJson==0) ) goto json_pfa_oom;
+ if( jsonConvertTextToBlob(p, (flgs & JSON_KEEPERROR) ? 0 : ctx) ){
+ if( flgs & JSON_KEEPERROR ){
+ p->nErr = 1;
+ return p;
+ }else{
+ jsonParseFree(p);
+ return 0;
+ }
+ }else{
+ int isRCStr = sqlite3ValueIsOfClass(pArg, sqlite3RCStrUnref);
+ int rc;
+ if( !isRCStr ){
+ char *zNew = sqlite3RCStrNew( p->nJson );
+ if( zNew==0 ) goto json_pfa_oom;
+ memcpy(zNew, p->zJson, p->nJson);
+ p->zJson = zNew;
+ p->zJson[p->nJson] = 0;
+ }else{
+ sqlite3RCStrRef(p->zJson);
+ }
+ p->bJsonIsRCStr = 1;
+ rc = jsonCacheInsert(ctx, p);
+ if( rc==SQLITE_NOMEM ) goto json_pfa_oom;
+ if( flgs & JSON_EDITABLE ){
+ pFromCache = p;
+ p = 0;
+ goto rebuild_from_cache;
+ }
+ }
+ return p;
+json_pfa_malformed:
+ if( flgs & JSON_KEEPERROR ){
+ p->nErr = 1;
+ return p;
+ }else{
+ jsonParseFree(p);
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ return 0;
+ }
-/****************************************************************************
-** SQL functions used for testing and debugging
-****************************************************************************/
+json_pfa_oom:
+ jsonParseFree(pFromCache);
+ jsonParseFree(p);
+ sqlite3_result_error_nomem(ctx);
+ return 0;
+}
-#ifdef SQLITE_DEBUG
/*
-** The json_parse(JSON) function returns a string which describes
-** a parse of the JSON provided. Or it returns NULL if JSON is not
-** well-formed.
+** Make the return value of a JSON function either the raw JSONB blob
+** or make it JSON text, depending on whether the JSON_BLOB flag is
+** set on the function.
*/
-static void jsonParseFunc(
+static void jsonReturnParse(
sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
+ JsonParse *p
){
- JsonString s; /* Output string - not real JSON */
- JsonParse x; /* The parse */
- u32 i;
-
- assert( argc==1 );
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- jsonParseFindParents(&x);
- jsonInit(&s, ctx);
- for(i=0; i<x.nNode; i++){
- const char *zType;
- if( x.aNode[i].jnFlags & JNODE_LABEL ){
- assert( x.aNode[i].eType==JSON_STRING );
- zType = "label";
+ int flgs;
+ if( p->oom ){
+ sqlite3_result_error_nomem(ctx);
+ return;
+ }
+ flgs = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ if( flgs & JSON_BLOB ){
+ if( p->nBlobAlloc>0 && !p->bReadOnly ){
+ sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_DYNAMIC);
+ p->nBlobAlloc = 0;
}else{
- zType = jsonType[x.aNode[i].eType];
+ sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_TRANSIENT);
}
- jsonPrintf(100, &s,"node %3u: %7s n=%-4d up=%-4d",
- i, zType, x.aNode[i].n, x.aUp[i]);
- assert( x.aNode[i].eU==0 || x.aNode[i].eU==1 );
- if( x.aNode[i].u.zJContent!=0 ){
- assert( x.aNode[i].eU==1 );
- jsonAppendRaw(&s, " ", 1);
- jsonAppendRaw(&s, x.aNode[i].u.zJContent, x.aNode[i].n);
- }else{
- assert( x.aNode[i].eU==0 );
+ }else{
+ JsonString s;
+ jsonStringInit(&s, ctx);
+ p->delta = 0;
+ jsonTranslateBlobToText(p, 0, &s);
+ jsonReturnString(&s, p, ctx);
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+ }
+}
+
+/****************************************************************************
+** SQL functions used for testing and debugging
+****************************************************************************/
+
+#if SQLITE_DEBUG
+/*
+** Decode JSONB bytes in aBlob[] starting at iStart through but not
+** including iEnd. Indent the
+** content by nIndent spaces.
+*/
+static void jsonDebugPrintBlob(
+ JsonParse *pParse, /* JSON content */
+ u32 iStart, /* Start rendering here */
+ u32 iEnd, /* Do not render this byte or any byte after this one */
+ int nIndent, /* Indent by this many spaces */
+ sqlite3_str *pOut /* Generate output into this sqlite3_str object */
+){
+ while( iStart<iEnd ){
+ u32 i, n, nn, sz = 0;
+ int showContent = 1;
+ u8 x = pParse->aBlob[iStart] & 0x0f;
+ u32 savedNBlob = pParse->nBlob;
+ sqlite3_str_appendf(pOut, "%5d:%*s", iStart, nIndent, "");
+ if( pParse->nBlobAlloc>pParse->nBlob ){
+ pParse->nBlob = pParse->nBlobAlloc;
+ }
+ nn = n = jsonbPayloadSize(pParse, iStart, &sz);
+ if( nn==0 ) nn = 1;
+ if( sz>0 && x<JSONB_ARRAY ){
+ nn += sz;
+ }
+ for(i=0; i<nn; i++){
+ sqlite3_str_appendf(pOut, " %02x", pParse->aBlob[iStart+i]);
+ }
+ if( n==0 ){
+ sqlite3_str_appendf(pOut, " ERROR invalid node size\n");
+ iStart = n==0 ? iStart+1 : iEnd;
+ continue;
}
- jsonAppendRaw(&s, "\n", 1);
+ pParse->nBlob = savedNBlob;
+ if( iStart+n+sz>iEnd ){
+ iEnd = iStart+n+sz;
+ if( iEnd>pParse->nBlob ){
+ if( pParse->nBlobAlloc>0 && iEnd>pParse->nBlobAlloc ){
+ iEnd = pParse->nBlobAlloc;
+ }else{
+ iEnd = pParse->nBlob;
+ }
+ }
+ }
+ sqlite3_str_appendall(pOut," <-- ");
+ switch( x ){
+ case JSONB_NULL: sqlite3_str_appendall(pOut,"null"); break;
+ case JSONB_TRUE: sqlite3_str_appendall(pOut,"true"); break;
+ case JSONB_FALSE: sqlite3_str_appendall(pOut,"false"); break;
+ case JSONB_INT: sqlite3_str_appendall(pOut,"int"); break;
+ case JSONB_INT5: sqlite3_str_appendall(pOut,"int5"); break;
+ case JSONB_FLOAT: sqlite3_str_appendall(pOut,"float"); break;
+ case JSONB_FLOAT5: sqlite3_str_appendall(pOut,"float5"); break;
+ case JSONB_TEXT: sqlite3_str_appendall(pOut,"text"); break;
+ case JSONB_TEXTJ: sqlite3_str_appendall(pOut,"textj"); break;
+ case JSONB_TEXT5: sqlite3_str_appendall(pOut,"text5"); break;
+ case JSONB_TEXTRAW: sqlite3_str_appendall(pOut,"textraw"); break;
+ case JSONB_ARRAY: {
+ sqlite3_str_appendf(pOut,"array, %u bytes\n", sz);
+ jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut);
+ showContent = 0;
+ break;
+ }
+ case JSONB_OBJECT: {
+ sqlite3_str_appendf(pOut, "object, %u bytes\n", sz);
+ jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut);
+ showContent = 0;
+ break;
+ }
+ default: {
+ sqlite3_str_appendall(pOut, "ERROR: unknown node type\n");
+ showContent = 0;
+ break;
+ }
+ }
+ if( showContent ){
+ if( sz==0 && x<=JSONB_FALSE ){
+ sqlite3_str_append(pOut, "\n", 1);
+ }else{
+ u32 i;
+ sqlite3_str_appendall(pOut, ": \"");
+ for(i=iStart+n; i<iStart+n+sz; i++){
+ u8 c = pParse->aBlob[i];
+ if( c<0x20 || c>=0x7f ) c = '.';
+ sqlite3_str_append(pOut, (char*)&c, 1);
+ }
+ sqlite3_str_append(pOut, "\"\n", 2);
+ }
+ }
+ iStart += n + sz;
+ }
+}
+static void jsonShowParse(JsonParse *pParse){
+ sqlite3_str out;
+ char zBuf[1000];
+ if( pParse==0 ){
+ printf("NULL pointer\n");
+ return;
+ }else{
+ printf("nBlobAlloc = %u\n", pParse->nBlobAlloc);
+ printf("nBlob = %u\n", pParse->nBlob);
+ printf("delta = %d\n", pParse->delta);
+ if( pParse->nBlob==0 ) return;
+ printf("content (bytes 0..%u):\n", pParse->nBlob-1);
}
- jsonParseReset(&x);
- jsonResult(&s);
+ sqlite3StrAccumInit(&out, 0, zBuf, sizeof(zBuf), 1000000);
+ jsonDebugPrintBlob(pParse, 0, pParse->nBlob, 0, &out);
+ printf("%s", sqlite3_str_value(&out));
+ sqlite3_str_reset(&out);
}
+#endif /* SQLITE_DEBUG */
+#ifdef SQLITE_DEBUG
/*
-** The json_test1(JSON) function return true (1) if the input is JSON
-** text generated by another json function. It returns (0) if the input
-** is not known to be JSON.
+** SQL function: json_parse(JSON)
+**
+** Parse JSON using jsonParseFuncArg(). Return text that is a
+** human-readable dump of the binary JSONB for the input parameter.
*/
-static void jsonTest1Func(
+static void jsonParseFunc(
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
- UNUSED_PARAMETER(argc);
- sqlite3_result_int(ctx, sqlite3_value_subtype(argv[0])==JSON_SUBTYPE);
+ JsonParse *p; /* The parse */
+ sqlite3_str out;
+
+ assert( argc>=1 );
+ sqlite3StrAccumInit(&out, 0, 0, 0, 1000000);
+ p = jsonParseFuncArg(ctx, argv[0], 0);
+ if( p==0 ) return;
+ if( argc==1 ){
+ jsonDebugPrintBlob(p, 0, p->nBlob, 0, &out);
+ sqlite3_result_text64(ctx, out.zText, out.nChar, SQLITE_DYNAMIC, SQLITE_UTF8);
+ }else{
+ jsonShowParse(p);
+ }
+ jsonParseFree(p);
}
#endif /* SQLITE_DEBUG */
@@ -201757,7 +206743,7 @@ static void jsonTest1Func(
****************************************************************************/
/*
-** Implementation of the json_QUOTE(VALUE) function. Return a JSON value
+** Implementation of the json_quote(VALUE) function. Return a JSON value
** corresponding to the SQL value input. Mostly this means putting
** double-quotes around strings and returning the unquoted string "null"
** when given a NULL input.
@@ -201770,9 +206756,9 @@ static void jsonQuoteFunc(
JsonString jx;
UNUSED_PARAMETER(argc);
- jsonInit(&jx, ctx);
- jsonAppendValue(&jx, argv[0]);
- jsonResult(&jx);
+ jsonStringInit(&jx, ctx);
+ jsonAppendSqlValue(&jx, argv[0]);
+ jsonReturnString(&jx, 0, 0);
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}
@@ -201789,18 +206775,17 @@ static void jsonArrayFunc(
int i;
JsonString jx;
- jsonInit(&jx, ctx);
+ jsonStringInit(&jx, ctx);
jsonAppendChar(&jx, '[');
for(i=0; i<argc; i++){
jsonAppendSeparator(&jx);
- jsonAppendValue(&jx, argv[i]);
+ jsonAppendSqlValue(&jx, argv[i]);
}
jsonAppendChar(&jx, ']');
- jsonResult(&jx);
+ jsonReturnString(&jx, 0, 0);
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}
-
/*
** json_array_length(JSON)
** json_array_length(JSON, PATH)
@@ -201814,39 +206799,53 @@ static void jsonArrayLengthFunc(
sqlite3_value **argv
){
JsonParse *p; /* The parse */
- sqlite3_int64 n = 0;
+ sqlite3_int64 cnt = 0;
u32 i;
- JsonNode *pNode;
+ u8 eErr = 0;
- p = jsonParseCached(ctx, argv, ctx);
+ p = jsonParseFuncArg(ctx, argv[0], 0);
if( p==0 ) return;
- assert( p->nNode );
if( argc==2 ){
const char *zPath = (const char*)sqlite3_value_text(argv[1]);
- pNode = jsonLookup(p, zPath, 0, ctx);
+ if( zPath==0 ){
+ jsonParseFree(p);
+ return;
+ }
+ i = jsonLookupStep(p, 0, zPath[0]=='$' ? zPath+1 : "@", 0);
+ if( JSON_LOOKUP_ISERROR(i) ){
+ if( i==JSON_LOOKUP_NOTFOUND ){
+ /* no-op */
+ }else if( i==JSON_LOOKUP_PATHERROR ){
+ jsonBadPathError(ctx, zPath);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ eErr = 1;
+ i = 0;
+ }
}else{
- pNode = p->aNode;
+ i = 0;
}
- if( pNode==0 ){
- return;
+ if( (p->aBlob[i] & 0x0f)==JSONB_ARRAY ){
+ cnt = jsonbArrayCount(p, i);
}
- if( pNode->eType==JSON_ARRAY ){
- assert( (pNode->jnFlags & JNODE_APPEND)==0 );
- for(i=1; i<=pNode->n; n++){
- i += jsonNodeSize(&pNode[i]);
- }
- }
- sqlite3_result_int64(ctx, n);
+ if( !eErr ) sqlite3_result_int64(ctx, cnt);
+ jsonParseFree(p);
}
-/*
-** Bit values for the flags passed into jsonExtractFunc() or
-** jsonSetFunc() via the user-data value.
-*/
-#define JSON_JSON 0x01 /* Result is always JSON */
-#define JSON_SQL 0x02 /* Result is always SQL */
-#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */
-#define JSON_ISSET 0x04 /* json_set(), not json_insert() */
+/* True if the string is all digits */
+static int jsonAllDigits(const char *z, int n){
+ int i;
+ for(i=0; i<n && sqlite3Isdigit(z[i]); i++){}
+ return i==n;
+}
+
+/* True if the string is all alphanumerics and underscores */
+static int jsonAllAlphanum(const char *z, int n){
+ int i;
+ for(i=0; i<n && (sqlite3Isalnum(z[i]) || z[i]=='_'); i++){}
+ return i==n;
+}
/*
** json_extract(JSON, PATH, ...)
@@ -201873,159 +206872,307 @@ static void jsonExtractFunc(
int argc,
sqlite3_value **argv
){
- JsonParse *p; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
- JsonString jx;
+ JsonParse *p = 0; /* The parse */
+ int flags; /* Flags associated with the function */
+ int i; /* Loop counter */
+ JsonString jx; /* String for array result */
if( argc<2 ) return;
- p = jsonParseCached(ctx, argv, ctx);
+ p = jsonParseFuncArg(ctx, argv[0], 0);
if( p==0 ) return;
- if( argc==2 ){
+ flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ jsonStringInit(&jx, ctx);
+ if( argc>2 ){
+ jsonAppendChar(&jx, '[');
+ }
+ for(i=1; i<argc; i++){
/* With a single PATH argument */
- zPath = (const char*)sqlite3_value_text(argv[1]);
- if( zPath==0 ) return;
- if( flags & JSON_ABPATH ){
- if( zPath[0]!='$' || (zPath[1]!='.' && zPath[1]!='[' && zPath[1]!=0) ){
- /* The -> and ->> operators accept abbreviated PATH arguments. This
- ** is mostly for compatibility with PostgreSQL, but also for
- ** convenience.
- **
- ** NUMBER ==> $[NUMBER] // PG compatible
- ** LABEL ==> $.LABEL // PG compatible
- ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience
- */
- jsonInit(&jx, ctx);
- if( sqlite3Isdigit(zPath[0]) ){
- jsonAppendRaw(&jx, "$[", 2);
- jsonAppendRaw(&jx, zPath, (int)strlen(zPath));
- jsonAppendRaw(&jx, "]", 2);
- }else{
- jsonAppendRaw(&jx, "$.", 1 + (zPath[0]!='['));
- jsonAppendRaw(&jx, zPath, (int)strlen(zPath));
- jsonAppendChar(&jx, 0);
- }
- pNode = jx.bErr ? 0 : jsonLookup(p, jx.zBuf, 0, ctx);
- jsonReset(&jx);
+ const char *zPath = (const char*)sqlite3_value_text(argv[i]);
+ int nPath;
+ u32 j;
+ if( zPath==0 ) goto json_extract_error;
+ nPath = sqlite3Strlen30(zPath);
+ if( zPath[0]=='$' ){
+ j = jsonLookupStep(p, 0, zPath+1, 0);
+ }else if( (flags & JSON_ABPATH) ){
+ /* The -> and ->> operators accept abbreviated PATH arguments. This
+ ** is mostly for compatibility with PostgreSQL, but also for
+ ** convenience.
+ **
+ ** NUMBER ==> $[NUMBER] // PG compatible
+ ** LABEL ==> $.LABEL // PG compatible
+ ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience
+ */
+ jsonStringInit(&jx, ctx);
+ if( jsonAllDigits(zPath, nPath) ){
+ jsonAppendRawNZ(&jx, "[", 1);
+ jsonAppendRaw(&jx, zPath, nPath);
+ jsonAppendRawNZ(&jx, "]", 2);
+ }else if( jsonAllAlphanum(zPath, nPath) ){
+ jsonAppendRawNZ(&jx, ".", 1);
+ jsonAppendRaw(&jx, zPath, nPath);
+ }else if( zPath[0]=='[' && nPath>=3 && zPath[nPath-1]==']' ){
+ jsonAppendRaw(&jx, zPath, nPath);
}else{
- pNode = jsonLookup(p, zPath, 0, ctx);
+ jsonAppendRawNZ(&jx, ".\"", 2);
+ jsonAppendRaw(&jx, zPath, nPath);
+ jsonAppendRawNZ(&jx, "\"", 1);
}
- if( pNode ){
+ jsonStringTerminate(&jx);
+ j = jsonLookupStep(p, 0, jx.zBuf, 0);
+ jsonStringReset(&jx);
+ }else{
+ jsonBadPathError(ctx, zPath);
+ goto json_extract_error;
+ }
+ if( j<p->nBlob ){
+ if( argc==2 ){
if( flags & JSON_JSON ){
- jsonReturnJson(pNode, ctx, 0);
+ jsonStringInit(&jx, ctx);
+ jsonTranslateBlobToText(p, j, &jx);
+ jsonReturnString(&jx, 0, 0);
+ jsonStringReset(&jx);
+ assert( (flags & JSON_BLOB)==0 );
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}else{
- jsonReturn(pNode, ctx, 0);
- sqlite3_result_subtype(ctx, 0);
+ jsonReturnFromBlob(p, j, ctx, 0);
+ if( (flags & (JSON_SQL|JSON_BLOB))==0
+ && (p->aBlob[j]&0x0f)>=JSONB_ARRAY
+ ){
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+ }
}
+ }else{
+ jsonAppendSeparator(&jx);
+ jsonTranslateBlobToText(p, j, &jx);
}
- }else{
- pNode = jsonLookup(p, zPath, 0, ctx);
- if( p->nErr==0 && pNode ) jsonReturn(pNode, ctx, 0);
- }
- }else{
- /* Two or more PATH arguments results in a JSON array with each
- ** element of the array being the value selected by one of the PATHs */
- int i;
- jsonInit(&jx, ctx);
- jsonAppendChar(&jx, '[');
- for(i=1; i<argc; i++){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- pNode = jsonLookup(p, zPath, 0, ctx);
- if( p->nErr ) break;
- jsonAppendSeparator(&jx);
- if( pNode ){
- jsonRenderNode(pNode, &jx, 0);
+ }else if( j==JSON_LOOKUP_NOTFOUND ){
+ if( argc==2 ){
+ goto json_extract_error; /* Return NULL if not found */
}else{
- jsonAppendRaw(&jx, "null", 4);
+ jsonAppendSeparator(&jx);
+ jsonAppendRawNZ(&jx, "null", 4);
}
+ }else if( j==JSON_LOOKUP_ERROR ){
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ goto json_extract_error;
+ }else{
+ jsonBadPathError(ctx, zPath);
+ goto json_extract_error;
}
- if( i==argc ){
- jsonAppendChar(&jx, ']');
- jsonResult(&jx);
+ }
+ if( argc>2 ){
+ jsonAppendChar(&jx, ']');
+ jsonReturnString(&jx, 0, 0);
+ if( (flags & JSON_BLOB)==0 ){
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}
- jsonReset(&jx);
}
+json_extract_error:
+ jsonStringReset(&jx);
+ jsonParseFree(p);
+ return;
}
-/* This is the RFC 7396 MergePatch algorithm.
-*/
-static JsonNode *jsonMergePatch(
- JsonParse *pParse, /* The JSON parser that contains the TARGET */
- u32 iTarget, /* Node of the TARGET in pParse */
- JsonNode *pPatch /* The PATCH */
-){
- u32 i, j;
- u32 iRoot;
- JsonNode *pTarget;
- if( pPatch->eType!=JSON_OBJECT ){
- return pPatch;
- }
- assert( iTarget<pParse->nNode );
- pTarget = &pParse->aNode[iTarget];
- assert( (pPatch->jnFlags & JNODE_APPEND)==0 );
- if( pTarget->eType!=JSON_OBJECT ){
- jsonRemoveAllNulls(pPatch);
- return pPatch;
- }
- iRoot = iTarget;
- for(i=1; i<pPatch->n; i += jsonNodeSize(&pPatch[i+1])+1){
- u32 nKey;
- const char *zKey;
- assert( pPatch[i].eType==JSON_STRING );
- assert( pPatch[i].jnFlags & JNODE_LABEL );
- assert( pPatch[i].eU==1 );
- nKey = pPatch[i].n;
- zKey = pPatch[i].u.zJContent;
- for(j=1; j<pTarget->n; j += jsonNodeSize(&pTarget[j+1])+1 ){
- assert( pTarget[j].eType==JSON_STRING );
- assert( pTarget[j].jnFlags & JNODE_LABEL );
- if( jsonSameLabel(&pPatch[i], &pTarget[j]) ){
- if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_PATCH) ) break;
- if( pPatch[i+1].eType==JSON_NULL ){
- pTarget[j+1].jnFlags |= JNODE_REMOVE;
- }else{
- JsonNode *pNew = jsonMergePatch(pParse, iTarget+j+1, &pPatch[i+1]);
- if( pNew==0 ) return 0;
- pTarget = &pParse->aNode[iTarget];
- if( pNew!=&pTarget[j+1] ){
- assert( pTarget[j+1].eU==0
- || pTarget[j+1].eU==1
- || pTarget[j+1].eU==2 );
- testcase( pTarget[j+1].eU==1 );
- testcase( pTarget[j+1].eU==2 );
- VVA( pTarget[j+1].eU = 5 );
- pTarget[j+1].u.pPatch = pNew;
- pTarget[j+1].jnFlags |= JNODE_PATCH;
- }
- }
- break;
+/*
+** Return codes for jsonMergePatch()
+*/
+#define JSON_MERGE_OK 0 /* Success */
+#define JSON_MERGE_BADTARGET 1 /* Malformed TARGET blob */
+#define JSON_MERGE_BADPATCH 2 /* Malformed PATCH blob */
+#define JSON_MERGE_OOM 3 /* Out-of-memory condition */
+
+/*
+** RFC-7396 MergePatch for two JSONB blobs.
+**
+** pTarget is the target. pPatch is the patch. The target is updated
+** in place. The patch is read-only.
+**
+** The original RFC-7396 algorithm is this:
+**
+** define MergePatch(Target, Patch):
+** if Patch is an Object:
+** if Target is not an Object:
+** Target = {} # Ignore the contents and set it to an empty Object
+** for each Name/Value pair in Patch:
+** if Value is null:
+** if Name exists in Target:
+** remove the Name/Value pair from Target
+** else:
+** Target[Name] = MergePatch(Target[Name], Value)
+** return Target
+** else:
+** return Patch
+**
+** Here is an equivalent algorithm restructured to show the actual
+** implementation:
+**
+** 01 define MergePatch(Target, Patch):
+** 02 if Patch is not an Object:
+** 03 return Patch
+** 04 else: // if Patch is an Object
+** 05 if Target is not an Object:
+** 06 Target = {}
+** 07 for each Name/Value pair in Patch:
+** 08 if Name exists in Target:
+** 09 if Value is null:
+** 10 remove the Name/Value pair from Target
+** 11 else
+** 12 Target[name] = MergePatch(Target[Name], Value)
+** 13 else if Value is not NULL:
+** 14 if Value is not an Object:
+** 15 Target[name] = Value
+** 16 else:
+** 17 Target[name] = MergePatch('{}',value)
+** 18 return Target
+** |
+** ^---- Line numbers referenced in comments in the implementation
+*/
+static int jsonMergePatch(
+ JsonParse *pTarget, /* The JSON parser that contains the TARGET */
+ u32 iTarget, /* Index of TARGET in pTarget->aBlob[] */
+ const JsonParse *pPatch, /* The PATCH */
+ u32 iPatch /* Index of PATCH in pPatch->aBlob[] */
+){
+ u8 x; /* Type of a single node */
+ u32 n, sz=0; /* Return values from jsonbPayloadSize() */
+ u32 iTCursor; /* Cursor position while scanning the target object */
+ u32 iTStart; /* First label in the target object */
+ u32 iTEndBE; /* Original first byte past end of target, before edit */
+ u32 iTEnd; /* Current first byte past end of target */
+ u8 eTLabel; /* Node type of the target label */
+ u32 iTLabel = 0; /* Index of the label */
+ u32 nTLabel = 0; /* Header size in bytes for the target label */
+ u32 szTLabel = 0; /* Size of the target label payload */
+ u32 iTValue = 0; /* Index of the target value */
+ u32 nTValue = 0; /* Header size of the target value */
+ u32 szTValue = 0; /* Payload size for the target value */
+
+ u32 iPCursor; /* Cursor position while scanning the patch */
+ u32 iPEnd; /* First byte past the end of the patch */
+ u8 ePLabel; /* Node type of the patch label */
+ u32 iPLabel; /* Start of patch label */
+ u32 nPLabel; /* Size of header on the patch label */
+ u32 szPLabel; /* Payload size of the patch label */
+ u32 iPValue; /* Start of patch value */
+ u32 nPValue; /* Header size for the patch value */
+ u32 szPValue; /* Payload size of the patch value */
+
+ assert( iTarget>=0 && iTarget<pTarget->nBlob );
+ assert( iPatch>=0 && iPatch<pPatch->nBlob );
+ x = pPatch->aBlob[iPatch] & 0x0f;
+ if( x!=JSONB_OBJECT ){ /* Algorithm line 02 */
+ u32 szPatch; /* Total size of the patch, header+payload */
+ u32 szTarget; /* Total size of the target, header+payload */
+ n = jsonbPayloadSize(pPatch, iPatch, &sz);
+ szPatch = n+sz;
+ sz = 0;
+ n = jsonbPayloadSize(pTarget, iTarget, &sz);
+ szTarget = n+sz;
+ jsonBlobEdit(pTarget, iTarget, szTarget, pPatch->aBlob+iPatch, szPatch);
+ return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK; /* Line 03 */
+ }
+ x = pTarget->aBlob[iTarget] & 0x0f;
+ if( x!=JSONB_OBJECT ){ /* Algorithm line 05 */
+ n = jsonbPayloadSize(pTarget, iTarget, &sz);
+ jsonBlobEdit(pTarget, iTarget+n, sz, 0, 0);
+ x = pTarget->aBlob[iTarget];
+ pTarget->aBlob[iTarget] = (x & 0xf0) | JSONB_OBJECT;
+ }
+ n = jsonbPayloadSize(pPatch, iPatch, &sz);
+ if( NEVER(n==0) ) return JSON_MERGE_BADPATCH;
+ iPCursor = iPatch+n;
+ iPEnd = iPCursor+sz;
+ n = jsonbPayloadSize(pTarget, iTarget, &sz);
+ if( NEVER(n==0) ) return JSON_MERGE_BADTARGET;
+ iTStart = iTarget+n;
+ iTEndBE = iTStart+sz;
+
+ while( iPCursor<iPEnd ){ /* Algorithm line 07 */
+ iPLabel = iPCursor;
+ ePLabel = pPatch->aBlob[iPCursor] & 0x0f;
+ if( ePLabel<JSONB_TEXT || ePLabel>JSONB_TEXTRAW ){
+ return JSON_MERGE_BADPATCH;
+ }
+ nPLabel = jsonbPayloadSize(pPatch, iPCursor, &szPLabel);
+ if( nPLabel==0 ) return JSON_MERGE_BADPATCH;
+ iPValue = iPCursor + nPLabel + szPLabel;
+ if( iPValue>=iPEnd ) return JSON_MERGE_BADPATCH;
+ nPValue = jsonbPayloadSize(pPatch, iPValue, &szPValue);
+ if( nPValue==0 ) return JSON_MERGE_BADPATCH;
+ iPCursor = iPValue + nPValue + szPValue;
+ if( iPCursor>iPEnd ) return JSON_MERGE_BADPATCH;
+
+ iTCursor = iTStart;
+ iTEnd = iTEndBE + pTarget->delta;
+ while( iTCursor<iTEnd ){
+ int isEqual; /* true if the patch and target labels match */
+ iTLabel = iTCursor;
+ eTLabel = pTarget->aBlob[iTCursor] & 0x0f;
+ if( eTLabel<JSONB_TEXT || eTLabel>JSONB_TEXTRAW ){
+ return JSON_MERGE_BADTARGET;
+ }
+ nTLabel = jsonbPayloadSize(pTarget, iTCursor, &szTLabel);
+ if( nTLabel==0 ) return JSON_MERGE_BADTARGET;
+ iTValue = iTLabel + nTLabel + szTLabel;
+ if( iTValue>=iTEnd ) return JSON_MERGE_BADTARGET;
+ nTValue = jsonbPayloadSize(pTarget, iTValue, &szTValue);
+ if( nTValue==0 ) return JSON_MERGE_BADTARGET;
+ if( iTValue + nTValue + szTValue > iTEnd ) return JSON_MERGE_BADTARGET;
+ isEqual = jsonLabelCompare(
+ (const char*)&pPatch->aBlob[iPLabel+nPLabel],
+ szPLabel,
+ (ePLabel==JSONB_TEXT || ePLabel==JSONB_TEXTRAW),
+ (const char*)&pTarget->aBlob[iTLabel+nTLabel],
+ szTLabel,
+ (eTLabel==JSONB_TEXT || eTLabel==JSONB_TEXTRAW));
+ if( isEqual ) break;
+ iTCursor = iTValue + nTValue + szTValue;
+ }
+ x = pPatch->aBlob[iPValue] & 0x0f;
+ if( iTCursor<iTEnd ){
+ /* A match was found. Algorithm line 08 */
+ if( x==0 ){
+ /* Patch value is NULL. Algorithm line 09 */
+ jsonBlobEdit(pTarget, iTLabel, nTLabel+szTLabel+nTValue+szTValue, 0,0);
+ /* vvvvvv----- No OOM on a delete-only edit */
+ if( NEVER(pTarget->oom) ) return JSON_MERGE_OOM;
+ }else{
+ /* Algorithm line 12 */
+ int rc, savedDelta = pTarget->delta;
+ pTarget->delta = 0;
+ rc = jsonMergePatch(pTarget, iTValue, pPatch, iPValue);
+ if( rc ) return rc;
+ pTarget->delta += savedDelta;
+ }
+ }else if( x>0 ){ /* Algorithm line 13 */
+ /* No match and patch value is not NULL */
+ u32 szNew = szPLabel+nPLabel;
+ if( (pPatch->aBlob[iPValue] & 0x0f)!=JSONB_OBJECT ){ /* Line 14 */
+ jsonBlobEdit(pTarget, iTEnd, 0, 0, szPValue+nPValue+szNew);
+ if( pTarget->oom ) return JSON_MERGE_OOM;
+ memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew);
+ memcpy(&pTarget->aBlob[iTEnd+szNew],
+ &pPatch->aBlob[iPValue], szPValue+nPValue);
+ }else{
+ int rc, savedDelta;
+ jsonBlobEdit(pTarget, iTEnd, 0, 0, szNew+1);
+ if( pTarget->oom ) return JSON_MERGE_OOM;
+ memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew);
+ pTarget->aBlob[iTEnd+szNew] = 0x00;
+ savedDelta = pTarget->delta;
+ pTarget->delta = 0;
+ rc = jsonMergePatch(pTarget, iTEnd+szNew,pPatch,iPValue);
+ if( rc ) return rc;
+ pTarget->delta += savedDelta;
}
}
- if( j>=pTarget->n && pPatch[i+1].eType!=JSON_NULL ){
- int iStart, iPatch;
- iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
- jsonParseAddNode(pParse, JSON_STRING, nKey, zKey);
- iPatch = jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
- if( pParse->oom ) return 0;
- jsonRemoveAllNulls(pPatch);
- pTarget = &pParse->aNode[iTarget];
- assert( pParse->aNode[iRoot].eU==0 || pParse->aNode[iRoot].eU==2 );
- testcase( pParse->aNode[iRoot].eU==2 );
- pParse->aNode[iRoot].jnFlags |= JNODE_APPEND;
- VVA( pParse->aNode[iRoot].eU = 2 );
- pParse->aNode[iRoot].u.iAppend = iStart - iRoot;
- iRoot = iStart;
- assert( pParse->aNode[iPatch].eU==0 );
- VVA( pParse->aNode[iPatch].eU = 5 );
- pParse->aNode[iPatch].jnFlags |= JNODE_PATCH;
- pParse->aNode[iPatch].u.pPatch = &pPatch[i+1];
- }
}
- return pTarget;
+ if( pTarget->delta ) jsonAfterEditSizeAdjust(pTarget, iTarget);
+ return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK;
}
+
/*
** Implementation of the json_mergepatch(JSON1,JSON2) function. Return a JSON
** object that is the result of running the RFC 7396 MergePatch() algorithm
@@ -202036,25 +207183,27 @@ static void jsonPatchFunc(
int argc,
sqlite3_value **argv
){
- JsonParse x; /* The JSON that is being patched */
- JsonParse y; /* The patch */
- JsonNode *pResult; /* The result of the merge */
+ JsonParse *pTarget; /* The TARGET */
+ JsonParse *pPatch; /* The PATCH */
+ int rc; /* Result code */
UNUSED_PARAMETER(argc);
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- if( jsonParse(&y, ctx, (const char*)sqlite3_value_text(argv[1])) ){
- jsonParseReset(&x);
- return;
- }
- pResult = jsonMergePatch(&x, 0, y.aNode);
- assert( pResult!=0 || x.oom );
- if( pResult ){
- jsonReturnJson(pResult, ctx, 0);
- }else{
- sqlite3_result_error_nomem(ctx);
+ assert( argc==2 );
+ pTarget = jsonParseFuncArg(ctx, argv[0], JSON_EDITABLE);
+ if( pTarget==0 ) return;
+ pPatch = jsonParseFuncArg(ctx, argv[1], 0);
+ if( pPatch ){
+ rc = jsonMergePatch(pTarget, 0, pPatch, 0);
+ if( rc==JSON_MERGE_OK ){
+ jsonReturnParse(ctx, pTarget);
+ }else if( rc==JSON_MERGE_OOM ){
+ sqlite3_result_error_nomem(ctx);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ jsonParseFree(pPatch);
}
- jsonParseReset(&x);
- jsonParseReset(&y);
+ jsonParseFree(pTarget);
}
@@ -202078,23 +207227,23 @@ static void jsonObjectFunc(
"of arguments", -1);
return;
}
- jsonInit(&jx, ctx);
+ jsonStringInit(&jx, ctx);
jsonAppendChar(&jx, '{');
for(i=0; i<argc; i+=2){
if( sqlite3_value_type(argv[i])!=SQLITE_TEXT ){
sqlite3_result_error(ctx, "json_object() labels must be TEXT", -1);
- jsonReset(&jx);
+ jsonStringReset(&jx);
return;
}
jsonAppendSeparator(&jx);
z = (const char*)sqlite3_value_text(argv[i]);
- n = (u32)sqlite3_value_bytes(argv[i]);
+ n = sqlite3_value_bytes(argv[i]);
jsonAppendString(&jx, z, n);
jsonAppendChar(&jx, ':');
- jsonAppendValue(&jx, argv[i+1]);
+ jsonAppendSqlValue(&jx, argv[i+1]);
}
jsonAppendChar(&jx, '}');
- jsonResult(&jx);
+ jsonReturnString(&jx, 0, 0);
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}
@@ -202110,26 +207259,50 @@ static void jsonRemoveFunc(
int argc,
sqlite3_value **argv
){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
+ JsonParse *p; /* The parse */
+ const char *zPath = 0; /* Path of element to be removed */
+ int i; /* Loop counter */
+ u32 rc; /* Subroutine return code */
if( argc<1 ) return;
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- for(i=1; i<(u32)argc; i++){
+ p = jsonParseFuncArg(ctx, argv[0], argc>1 ? JSON_EDITABLE : 0);
+ if( p==0 ) return;
+ for(i=1; i<argc; i++){
zPath = (const char*)sqlite3_value_text(argv[i]);
- if( zPath==0 ) goto remove_done;
- pNode = jsonLookup(&x, zPath, 0, ctx);
- if( x.nErr ) goto remove_done;
- if( pNode ) pNode->jnFlags |= JNODE_REMOVE;
- }
- if( (x.aNode[0].jnFlags & JNODE_REMOVE)==0 ){
- jsonReturnJson(x.aNode, ctx, 0);
+ if( zPath==0 ){
+ goto json_remove_done;
+ }
+ if( zPath[0]!='$' ){
+ goto json_remove_patherror;
+ }
+ if( zPath[1]==0 ){
+ /* json_remove(j,'$') returns NULL */
+ goto json_remove_done;
+ }
+ p->eEdit = JEDIT_DEL;
+ p->delta = 0;
+ rc = jsonLookupStep(p, 0, zPath+1, 0);
+ if( JSON_LOOKUP_ISERROR(rc) ){
+ if( rc==JSON_LOOKUP_NOTFOUND ){
+ continue; /* No-op */
+ }else if( rc==JSON_LOOKUP_PATHERROR ){
+ jsonBadPathError(ctx, zPath);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ goto json_remove_done;
+ }
}
-remove_done:
- jsonParseReset(&x);
+ jsonReturnParse(ctx, p);
+ jsonParseFree(p);
+ return;
+
+json_remove_patherror:
+ jsonBadPathError(ctx, zPath);
+
+json_remove_done:
+ jsonParseFree(p);
+ return;
}
/*
@@ -202143,38 +207316,12 @@ static void jsonReplaceFunc(
int argc,
sqlite3_value **argv
){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
-
if( argc<1 ) return;
if( (argc&1)==0 ) {
jsonWrongNumArgs(ctx, "replace");
return;
}
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- for(i=1; i<(u32)argc; i+=2){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- pNode = jsonLookup(&x, zPath, 0, ctx);
- if( x.nErr ) goto replace_err;
- if( pNode ){
- assert( pNode->eU==0 || pNode->eU==1 || pNode->eU==4 );
- testcase( pNode->eU!=0 && pNode->eU!=1 );
- pNode->jnFlags |= (u8)JNODE_REPLACE;
- VVA( pNode->eU = 4 );
- pNode->u.iReplace = i + 1;
- }
- }
- if( x.aNode[0].jnFlags & JNODE_REPLACE ){
- assert( x.aNode[0].eU==4 );
- sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]);
- }else{
- jsonReturnJson(x.aNode, ctx, argv);
- }
-replace_err:
- jsonParseReset(&x);
+ jsonInsertIntoBlob(ctx, argc, argv, JEDIT_REPL);
}
@@ -202195,45 +207342,16 @@ static void jsonSetFunc(
int argc,
sqlite3_value **argv
){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
- int bApnd;
- int bIsSet = sqlite3_user_data(ctx)!=0;
+
+ int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ int bIsSet = (flags&JSON_ISSET)!=0;
if( argc<1 ) return;
if( (argc&1)==0 ) {
jsonWrongNumArgs(ctx, bIsSet ? "set" : "insert");
return;
}
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- for(i=1; i<(u32)argc; i+=2){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- bApnd = 0;
- pNode = jsonLookup(&x, zPath, &bApnd, ctx);
- if( x.oom ){
- sqlite3_result_error_nomem(ctx);
- goto jsonSetDone;
- }else if( x.nErr ){
- goto jsonSetDone;
- }else if( pNode && (bApnd || bIsSet) ){
- testcase( pNode->eU!=0 && pNode->eU!=1 );
- assert( pNode->eU!=3 && pNode->eU!=5 );
- VVA( pNode->eU = 4 );
- pNode->jnFlags |= (u8)JNODE_REPLACE;
- pNode->u.iReplace = i + 1;
- }
- }
- if( x.aNode[0].jnFlags & JNODE_REPLACE ){
- assert( x.aNode[0].eU==4 );
- sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]);
- }else{
- jsonReturnJson(x.aNode, ctx, argv);
- }
-jsonSetDone:
- jsonParseReset(&x);
+ jsonInsertIntoBlob(ctx, argc, argv, bIsSet ? JEDIT_SET : JEDIT_INS);
}
/*
@@ -202249,27 +207367,93 @@ static void jsonTypeFunc(
sqlite3_value **argv
){
JsonParse *p; /* The parse */
- const char *zPath;
- JsonNode *pNode;
+ const char *zPath = 0;
+ u32 i;
- p = jsonParseCached(ctx, argv, ctx);
+ p = jsonParseFuncArg(ctx, argv[0], 0);
if( p==0 ) return;
if( argc==2 ){
zPath = (const char*)sqlite3_value_text(argv[1]);
- pNode = jsonLookup(p, zPath, 0, ctx);
+ if( zPath==0 ) goto json_type_done;
+ if( zPath[0]!='$' ){
+ jsonBadPathError(ctx, zPath);
+ goto json_type_done;
+ }
+ i = jsonLookupStep(p, 0, zPath+1, 0);
+ if( JSON_LOOKUP_ISERROR(i) ){
+ if( i==JSON_LOOKUP_NOTFOUND ){
+ /* no-op */
+ }else if( i==JSON_LOOKUP_PATHERROR ){
+ jsonBadPathError(ctx, zPath);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ goto json_type_done;
+ }
}else{
- pNode = p->aNode;
- }
- if( pNode ){
- sqlite3_result_text(ctx, jsonType[pNode->eType], -1, SQLITE_STATIC);
+ i = 0;
}
+ sqlite3_result_text(ctx, jsonbType[p->aBlob[i]&0x0f], -1, SQLITE_STATIC);
+json_type_done:
+ jsonParseFree(p);
}
/*
** json_valid(JSON)
-**
-** Return 1 if JSON is a well-formed canonical JSON string according
-** to RFC-7159. Return 0 otherwise.
+** json_valid(JSON, FLAGS)
+**
+** Check the JSON argument to see if it is well-formed. The FLAGS argument
+** encodes the various constraints on what is meant by "well-formed":
+**
+** 0x01 Canonical RFC-8259 JSON text
+** 0x02 JSON text with optional JSON-5 extensions
+** 0x04 Superficially appears to be JSONB
+** 0x08 Strictly well-formed JSONB
+**
+** If the FLAGS argument is omitted, it defaults to 1. Useful values for
+** FLAGS include:
+**
+** 1 Strict canonical JSON text
+** 2 JSON text perhaps with JSON-5 extensions
+** 4 Superficially appears to be JSONB
+** 5 Canonical JSON text or superficial JSONB
+** 6 JSON-5 text or superficial JSONB
+** 8 Strict JSONB
+** 9 Canonical JSON text or strict JSONB
+** 10 JSON-5 text or strict JSONB
+**
+** Other flag combinations are redundant. For example, every canonical
+** JSON text is also well-formed JSON-5 text, so FLAG values 2 and 3
+** are the same. Similarly, any input that passes a strict JSONB validation
+** will also pass the superficial validation so 12 through 15 are the same
+** as 8 through 11 respectively.
+**
+** This routine runs in linear time to validate text and when doing strict
+** JSONB validation. Superficial JSONB validation is constant time,
+** assuming the BLOB is already in memory. The performance advantage
+** of superficial JSONB validation is why that option is provided.
+** Application developers can choose to do fast superficial validation or
+** slower strict validation, according to their specific needs.
+**
+** Only the lower four bits of the FLAGS argument are currently used.
+** Higher bits are reserved for future expansion. To facilitate
+** compatibility, the current implementation raises an error if any bit
+** in FLAGS is set other than the lower four bits.
+**
+** The original circa 2015 implementation of the JSON routines in
+** SQLite only supported canonical RFC-8259 JSON text and the json_valid()
+** function only accepted one argument. That is why the default value
+** for the FLAGS argument is 1, since FLAGS=1 causes this routine to only
+** recognize canonical RFC-8259 JSON text as valid. The extra FLAGS
+** argument was added when the JSON routines were extended to support
+** JSON5-like extensions and binary JSONB stored in BLOBs.
+**
+** Return Values:
+**
+** * Raise an error if FLAGS is outside the range of 1 to 15.
+** * Return NULL if the input is NULL
+** * Return 1 if the input is well-formed.
+** * Return 0 if the input is not well-formed.
*/
static void jsonValidFunc(
sqlite3_context *ctx,
@@ -202277,73 +207461,128 @@ static void jsonValidFunc(
sqlite3_value **argv
){
JsonParse *p; /* The parse */
- UNUSED_PARAMETER(argc);
- if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
- p = jsonParseCached(ctx, argv, 0);
- if( p==0 || p->oom ){
- sqlite3_result_error_nomem(ctx);
- sqlite3_free(p);
- }else{
- sqlite3_result_int(ctx, p->nErr==0 && p->hasNonstd==0);
- if( p->nErr ) jsonParseFree(p);
+ u8 flags = 1;
+ u8 res = 0;
+ if( argc==2 ){
+ i64 f = sqlite3_value_int64(argv[1]);
+ if( f<1 || f>15 ){
+ sqlite3_result_error(ctx, "FLAGS parameter to json_valid() must be"
+ " between 1 and 15", -1);
+ return;
+ }
+ flags = f & 0x0f;
+ }
+ switch( sqlite3_value_type(argv[0]) ){
+ case SQLITE_NULL: {
+#ifdef SQLITE_LEGACY_JSON_VALID
+ /* Incorrect legacy behavior was to return FALSE for a NULL input */
+ sqlite3_result_int(ctx, 0);
+#endif
+ return;
+ }
+ case SQLITE_BLOB: {
+ if( jsonFuncArgMightBeBinary(argv[0]) ){
+ if( flags & 0x04 ){
+ /* Superficial checking only - accomplished by the
+ ** jsonFuncArgMightBeBinary() call above. */
+ res = 1;
+ }else if( flags & 0x08 ){
+ /* Strict checking. Check by translating BLOB->TEXT->BLOB. If
+ ** no errors occur, call that a "strict check". */
+ JsonParse px;
+ u32 iErr;
+ memset(&px, 0, sizeof(px));
+ px.aBlob = (u8*)sqlite3_value_blob(argv[0]);
+ px.nBlob = sqlite3_value_bytes(argv[0]);
+ iErr = jsonbValidityCheck(&px, 0, px.nBlob, 1);
+ res = iErr==0;
+ }
+ break;
+ }
+ /* Fall through into interpreting the input as text. See note
+ ** above at tag-20240123-a. */
+ /* no break */ deliberate_fall_through
+ }
+ default: {
+ JsonParse px;
+ if( (flags & 0x3)==0 ) break;
+ memset(&px, 0, sizeof(px));
+
+ p = jsonParseFuncArg(ctx, argv[0], JSON_KEEPERROR);
+ if( p ){
+ if( p->oom ){
+ sqlite3_result_error_nomem(ctx);
+ }else if( p->nErr ){
+ /* no-op */
+ }else if( (flags & 0x02)!=0 || p->hasNonstd==0 ){
+ res = 1;
+ }
+ jsonParseFree(p);
+ }else{
+ sqlite3_result_error_nomem(ctx);
+ }
+ break;
+ }
}
+ sqlite3_result_int(ctx, res);
}
/*
** json_error_position(JSON)
**
-** If the argument is not an interpretable JSON string, then return the 1-based
-** character position at which the parser first recognized that the input
-** was in error. The left-most character is 1. If the string is valid
-** JSON, then return 0.
-**
-** Note that json_valid() is only true for strictly conforming canonical JSON.
-** But this routine returns zero if the input contains extension. Thus:
+** If the argument is NULL, return NULL
**
-** (1) If the input X is strictly conforming canonical JSON:
+** If the argument is BLOB, do a full validity check and return non-zero
+** if the check fails. The return value is the approximate 1-based offset
+** to the byte of the element that contains the first error.
**
-** json_valid(X) returns true
-** json_error_position(X) returns 0
-**
-** (2) If the input X is JSON but it includes extension (such as JSON5) that
-** are not part of RFC-8259:
-**
-** json_valid(X) returns false
-** json_error_position(X) return 0
-**
-** (3) If the input X cannot be interpreted as JSON even taking extensions
-** into account:
-**
-** json_valid(X) return false
-** json_error_position(X) returns 1 or more
+** Otherwise interpret the argument is TEXT (even if it is numeric) and
+** return the 1-based character position for where the parser first recognized
+** that the input was not valid JSON, or return 0 if the input text looks
+** ok. JSON-5 extensions are accepted.
*/
static void jsonErrorFunc(
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
- JsonParse *p; /* The parse */
+ i64 iErrPos = 0; /* Error position to be returned */
+ JsonParse s;
+
+ assert( argc==1 );
UNUSED_PARAMETER(argc);
- if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
- p = jsonParseCached(ctx, argv, 0);
- if( p==0 || p->oom ){
+ memset(&s, 0, sizeof(s));
+ s.db = sqlite3_context_db_handle(ctx);
+ if( jsonFuncArgMightBeBinary(argv[0]) ){
+ s.aBlob = (u8*)sqlite3_value_blob(argv[0]);
+ s.nBlob = sqlite3_value_bytes(argv[0]);
+ iErrPos = (i64)jsonbValidityCheck(&s, 0, s.nBlob, 1);
+ }else{
+ s.zJson = (char*)sqlite3_value_text(argv[0]);
+ if( s.zJson==0 ) return; /* NULL input or OOM */
+ s.nJson = sqlite3_value_bytes(argv[0]);
+ if( jsonConvertTextToBlob(&s,0) ){
+ if( s.oom ){
+ iErrPos = -1;
+ }else{
+ /* Convert byte-offset s.iErr into a character offset */
+ u32 k;
+ assert( s.zJson!=0 ); /* Because s.oom is false */
+ for(k=0; k<s.iErr && ALWAYS(s.zJson[k]); k++){
+ if( (s.zJson[k] & 0xc0)!=0x80 ) iErrPos++;
+ }
+ iErrPos++;
+ }
+ }
+ }
+ jsonParseReset(&s);
+ if( iErrPos<0 ){
sqlite3_result_error_nomem(ctx);
- sqlite3_free(p);
- }else if( p->nErr==0 ){
- sqlite3_result_int(ctx, 0);
}else{
- int n = 1;
- u32 i;
- const char *z = p->zJson;
- for(i=0; i<p->iErr && ALWAYS(z[i]); i++){
- if( (z[i]&0xc0)!=0x80 ) n++;
- }
- sqlite3_result_int(ctx, n);
- jsonParseFree(p);
+ sqlite3_result_int64(ctx, iErrPos);
}
}
-
/****************************************************************************
** Aggregate SQL function implementations
****************************************************************************/
@@ -202362,31 +207601,42 @@ static void jsonArrayStep(
pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
if( pStr ){
if( pStr->zBuf==0 ){
- jsonInit(pStr, ctx);
+ jsonStringInit(pStr, ctx);
jsonAppendChar(pStr, '[');
}else if( pStr->nUsed>1 ){
jsonAppendChar(pStr, ',');
}
pStr->pCtx = ctx;
- jsonAppendValue(pStr, argv[0]);
+ jsonAppendSqlValue(pStr, argv[0]);
}
}
static void jsonArrayCompute(sqlite3_context *ctx, int isFinal){
JsonString *pStr;
pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
if( pStr ){
+ int flags;
pStr->pCtx = ctx;
jsonAppendChar(pStr, ']');
- if( pStr->bErr ){
- if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
- assert( pStr->bStatic );
+ flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ if( pStr->eErr ){
+ jsonReturnString(pStr, 0, 0);
+ return;
+ }else if( flags & JSON_BLOB ){
+ jsonReturnStringAsBlob(pStr);
+ if( isFinal ){
+ if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf);
+ }else{
+ jsonStringTrimOneChar(pStr);
+ }
+ return;
}else if( isFinal ){
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed,
- pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free);
+ pStr->bStatic ? SQLITE_TRANSIENT :
+ sqlite3RCStrUnref);
pStr->bStatic = 1;
}else{
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);
- pStr->nUsed--;
+ jsonStringTrimOneChar(pStr);
}
}else{
sqlite3_result_text(ctx, "[]", 2, SQLITE_STATIC);
@@ -202423,7 +207673,7 @@ static void jsonGroupInverse(
pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
#ifdef NEVER
/* pStr is always non-NULL since jsonArrayStep() or jsonObjectStep() will
- ** always have been called to initalize it */
+ ** always have been called to initialize it */
if( NEVER(!pStr) ) return;
#endif
z = pStr->zBuf;
@@ -202467,34 +207717,46 @@ static void jsonObjectStep(
pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
if( pStr ){
if( pStr->zBuf==0 ){
- jsonInit(pStr, ctx);
+ jsonStringInit(pStr, ctx);
jsonAppendChar(pStr, '{');
}else if( pStr->nUsed>1 ){
jsonAppendChar(pStr, ',');
}
pStr->pCtx = ctx;
z = (const char*)sqlite3_value_text(argv[0]);
- n = (u32)sqlite3_value_bytes(argv[0]);
+ n = sqlite3Strlen30(z);
jsonAppendString(pStr, z, n);
jsonAppendChar(pStr, ':');
- jsonAppendValue(pStr, argv[1]);
+ jsonAppendSqlValue(pStr, argv[1]);
}
}
static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){
JsonString *pStr;
pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
if( pStr ){
+ int flags;
jsonAppendChar(pStr, '}');
- if( pStr->bErr ){
- if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
- assert( pStr->bStatic );
+ pStr->pCtx = ctx;
+ flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ if( pStr->eErr ){
+ jsonReturnString(pStr, 0, 0);
+ return;
+ }else if( flags & JSON_BLOB ){
+ jsonReturnStringAsBlob(pStr);
+ if( isFinal ){
+ if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf);
+ }else{
+ jsonStringTrimOneChar(pStr);
+ }
+ return;
}else if( isFinal ){
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed,
- pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free);
+ pStr->bStatic ? SQLITE_TRANSIENT :
+ sqlite3RCStrUnref);
pStr->bStatic = 1;
}else{
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);
- pStr->nUsed--;
+ jsonStringTrimOneChar(pStr);
}
}else{
sqlite3_result_text(ctx, "{}", 2, SQLITE_STATIC);
@@ -202514,19 +207776,37 @@ static void jsonObjectFinal(sqlite3_context *ctx){
/****************************************************************************
** The json_each virtual table
****************************************************************************/
+typedef struct JsonParent JsonParent;
+struct JsonParent {
+ u32 iHead; /* Start of object or array */
+ u32 iValue; /* Start of the value */
+ u32 iEnd; /* First byte past the end */
+ u32 nPath; /* Length of path */
+ i64 iKey; /* Key for JSONB_ARRAY */
+};
+
typedef struct JsonEachCursor JsonEachCursor;
struct JsonEachCursor {
sqlite3_vtab_cursor base; /* Base class - must be first */
u32 iRowid; /* The rowid */
- u32 iBegin; /* The first node of the scan */
- u32 i; /* Index in sParse.aNode[] of current row */
+ u32 i; /* Index in sParse.aBlob[] of current row */
u32 iEnd; /* EOF when i equals or exceeds this value */
- u8 eType; /* Type of top-level element */
+ u32 nRoot; /* Size of the root path in bytes */
+ u8 eType; /* Type of the container for element i */
u8 bRecursive; /* True for json_tree(). False for json_each() */
- char *zJson; /* Input JSON */
- char *zRoot; /* Path by which to filter zJson */
+ u32 nParent; /* Current nesting depth */
+ u32 nParentAlloc; /* Space allocated for aParent[] */
+ JsonParent *aParent; /* Parent elements of i */
+ sqlite3 *db; /* Database connection */
+ JsonString path; /* Current path */
JsonParse sParse; /* Parse of the input JSON */
};
+typedef struct JsonEachConnection JsonEachConnection;
+struct JsonEachConnection {
+ sqlite3_vtab base; /* Base class - must be first */
+ sqlite3 *db; /* Database connection */
+};
+
/* Constructor for the json_each virtual table */
static int jsonEachConnect(
@@ -202536,7 +207816,7 @@ static int jsonEachConnect(
sqlite3_vtab **ppVtab,
char **pzErr
){
- sqlite3_vtab *pNew;
+ JsonEachConnection *pNew;
int rc;
/* Column numbers */
@@ -202562,28 +207842,32 @@ static int jsonEachConnect(
"CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,"
"json HIDDEN,root HIDDEN)");
if( rc==SQLITE_OK ){
- pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) );
+ pNew = (JsonEachConnection*)sqlite3DbMallocZero(db, sizeof(*pNew));
+ *ppVtab = (sqlite3_vtab*)pNew;
if( pNew==0 ) return SQLITE_NOMEM;
- memset(pNew, 0, sizeof(*pNew));
sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS);
+ pNew->db = db;
}
return rc;
}
/* destructor for json_each virtual table */
static int jsonEachDisconnect(sqlite3_vtab *pVtab){
- sqlite3_free(pVtab);
+ JsonEachConnection *p = (JsonEachConnection*)pVtab;
+ sqlite3DbFree(p->db, pVtab);
return SQLITE_OK;
}
/* constructor for a JsonEachCursor object for json_each(). */
static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
+ JsonEachConnection *pVtab = (JsonEachConnection*)p;
JsonEachCursor *pCur;
UNUSED_PARAMETER(p);
- pCur = sqlite3_malloc( sizeof(*pCur) );
+ pCur = sqlite3DbMallocZero(pVtab->db, sizeof(*pCur));
if( pCur==0 ) return SQLITE_NOMEM;
- memset(pCur, 0, sizeof(*pCur));
+ pCur->db = pVtab->db;
+ jsonStringZero(&pCur->path);
*ppCursor = &pCur->base;
return SQLITE_OK;
}
@@ -202601,22 +207885,24 @@ static int jsonEachOpenTree(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
/* Reset a JsonEachCursor back to its original state. Free any memory
** held. */
static void jsonEachCursorReset(JsonEachCursor *p){
- sqlite3_free(p->zJson);
- sqlite3_free(p->zRoot);
jsonParseReset(&p->sParse);
+ jsonStringReset(&p->path);
+ sqlite3DbFree(p->db, p->aParent);
p->iRowid = 0;
p->i = 0;
+ p->aParent = 0;
+ p->nParent = 0;
+ p->nParentAlloc = 0;
p->iEnd = 0;
p->eType = 0;
- p->zJson = 0;
- p->zRoot = 0;
}
/* Destructor for a jsonEachCursor object */
static int jsonEachClose(sqlite3_vtab_cursor *cur){
JsonEachCursor *p = (JsonEachCursor*)cur;
jsonEachCursorReset(p);
- sqlite3_free(cur);
+
+ sqlite3DbFree(p->db, cur);
return SQLITE_OK;
}
@@ -202627,200 +207913,230 @@ static int jsonEachEof(sqlite3_vtab_cursor *cur){
return p->i >= p->iEnd;
}
-/* Advance the cursor to the next element for json_tree() */
-static int jsonEachNext(sqlite3_vtab_cursor *cur){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- if( p->bRecursive ){
- if( p->sParse.aNode[p->i].jnFlags & JNODE_LABEL ) p->i++;
- p->i++;
- p->iRowid++;
- if( p->i<p->iEnd ){
- u32 iUp = p->sParse.aUp[p->i];
- JsonNode *pUp = &p->sParse.aNode[iUp];
- p->eType = pUp->eType;
- if( pUp->eType==JSON_ARRAY ){
- assert( pUp->eU==0 || pUp->eU==3 );
- testcase( pUp->eU==3 );
- VVA( pUp->eU = 3 );
- if( iUp==p->i-1 ){
- pUp->u.iKey = 0;
- }else{
- pUp->u.iKey++;
+/*
+** If the cursor is currently pointing at the label of a object entry,
+** then return the index of the value. For all other cases, return the
+** current pointer position, which is the value.
+*/
+static int jsonSkipLabel(JsonEachCursor *p){
+ if( p->eType==JSONB_OBJECT ){
+ u32 sz = 0;
+ u32 n = jsonbPayloadSize(&p->sParse, p->i, &sz);
+ return p->i + n + sz;
+ }else{
+ return p->i;
+ }
+}
+
+/*
+** Append the path name for the current element.
+*/
+static void jsonAppendPathName(JsonEachCursor *p){
+ assert( p->nParent>0 );
+ assert( p->eType==JSONB_ARRAY || p->eType==JSONB_OBJECT );
+ if( p->eType==JSONB_ARRAY ){
+ jsonPrintf(30, &p->path, "[%lld]", p->aParent[p->nParent-1].iKey);
+ }else{
+ u32 n, sz = 0, k, i;
+ const char *z;
+ int needQuote = 0;
+ n = jsonbPayloadSize(&p->sParse, p->i, &sz);
+ k = p->i + n;
+ z = (const char*)&p->sParse.aBlob[k];
+ if( sz==0 || !sqlite3Isalpha(z[0]) ){
+ needQuote = 1;
+ }else{
+ for(i=0; i<sz; i++){
+ if( !sqlite3Isalnum(z[i]) ){
+ needQuote = 1;
+ break;
}
}
}
- }else{
- switch( p->eType ){
- case JSON_ARRAY: {
- p->i += jsonNodeSize(&p->sParse.aNode[p->i]);
- p->iRowid++;
- break;
- }
- case JSON_OBJECT: {
- p->i += 1 + jsonNodeSize(&p->sParse.aNode[p->i+1]);
- p->iRowid++;
- break;
- }
- default: {
- p->i = p->iEnd;
- break;
- }
+ if( needQuote ){
+ jsonPrintf(sz+4,&p->path,".\"%.*s\"", sz, z);
+ }else{
+ jsonPrintf(sz+2,&p->path,".%.*s", sz, z);
}
}
- return SQLITE_OK;
}
-/* Append an object label to the JSON Path being constructed
-** in pStr.
-*/
-static void jsonAppendObjectPathElement(
- JsonString *pStr,
- JsonNode *pNode
-){
- int jj, nn;
- const char *z;
- assert( pNode->eType==JSON_STRING );
- assert( pNode->jnFlags & JNODE_LABEL );
- assert( pNode->eU==1 );
- z = pNode->u.zJContent;
- nn = pNode->n;
- if( (pNode->jnFlags & JNODE_RAW)==0 ){
- assert( nn>=2 );
- assert( z[0]=='"' || z[0]=='\'' );
- assert( z[nn-1]=='"' || z[0]=='\'' );
- if( nn>2 && sqlite3Isalpha(z[1]) ){
- for(jj=2; jj<nn-1 && sqlite3Isalnum(z[jj]); jj++){}
- if( jj==nn-1 ){
- z++;
- nn -= 2;
+/* Advance the cursor to the next element for json_tree() */
+static int jsonEachNext(sqlite3_vtab_cursor *cur){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ int rc = SQLITE_OK;
+ if( p->bRecursive ){
+ u8 x;
+ u8 levelChange = 0;
+ u32 n, sz = 0;
+ u32 i = jsonSkipLabel(p);
+ x = p->sParse.aBlob[i] & 0x0f;
+ n = jsonbPayloadSize(&p->sParse, i, &sz);
+ if( x==JSONB_OBJECT || x==JSONB_ARRAY ){
+ JsonParent *pParent;
+ if( p->nParent>=p->nParentAlloc ){
+ JsonParent *pNew;
+ u64 nNew;
+ nNew = p->nParentAlloc*2 + 3;
+ pNew = sqlite3DbRealloc(p->db, p->aParent, sizeof(JsonParent)*nNew);
+ if( pNew==0 ) return SQLITE_NOMEM;
+ p->nParentAlloc = (u32)nNew;
+ p->aParent = pNew;
+ }
+ levelChange = 1;
+ pParent = &p->aParent[p->nParent];
+ pParent->iHead = p->i;
+ pParent->iValue = i;
+ pParent->iEnd = i + n + sz;
+ pParent->iKey = -1;
+ pParent->nPath = (u32)p->path.nUsed;
+ if( p->eType && p->nParent ){
+ jsonAppendPathName(p);
+ if( p->path.eErr ) rc = SQLITE_NOMEM;
+ }
+ p->nParent++;
+ p->i = i + n;
+ }else{
+ p->i = i + n + sz;
+ }
+ while( p->nParent>0 && p->i >= p->aParent[p->nParent-1].iEnd ){
+ p->nParent--;
+ p->path.nUsed = p->aParent[p->nParent].nPath;
+ levelChange = 1;
+ }
+ if( levelChange ){
+ if( p->nParent>0 ){
+ JsonParent *pParent = &p->aParent[p->nParent-1];
+ u32 iVal = pParent->iValue;
+ p->eType = p->sParse.aBlob[iVal] & 0x0f;
+ }else{
+ p->eType = 0;
}
}
+ }else{
+ u32 n, sz = 0;
+ u32 i = jsonSkipLabel(p);
+ n = jsonbPayloadSize(&p->sParse, i, &sz);
+ p->i = i + n + sz;
+ }
+ if( p->eType==JSONB_ARRAY && p->nParent ){
+ p->aParent[p->nParent-1].iKey++;
}
- jsonPrintf(nn+2, pStr, ".%.*s", nn, z);
+ p->iRowid++;
+ return rc;
}
-/* Append the name of the path for element i to pStr
+/* Length of the path for rowid==0 in bRecursive mode.
*/
-static void jsonEachComputePath(
- JsonEachCursor *p, /* The cursor */
- JsonString *pStr, /* Write the path here */
- u32 i /* Path to this element */
-){
- JsonNode *pNode, *pUp;
- u32 iUp;
- if( i==0 ){
- jsonAppendChar(pStr, '$');
- return;
- }
- iUp = p->sParse.aUp[i];
- jsonEachComputePath(p, pStr, iUp);
- pNode = &p->sParse.aNode[i];
- pUp = &p->sParse.aNode[iUp];
- if( pUp->eType==JSON_ARRAY ){
- assert( pUp->eU==3 || (pUp->eU==0 && pUp->u.iKey==0) );
- testcase( pUp->eU==0 );
- jsonPrintf(30, pStr, "[%d]", pUp->u.iKey);
- }else{
- assert( pUp->eType==JSON_OBJECT );
- if( (pNode->jnFlags & JNODE_LABEL)==0 ) pNode--;
- jsonAppendObjectPathElement(pStr, pNode);
+static int jsonEachPathLength(JsonEachCursor *p){
+ u32 n = p->path.nUsed;
+ char *z = p->path.zBuf;
+ if( p->iRowid==0 && p->bRecursive && n>=2 ){
+ while( n>1 ){
+ n--;
+ if( z[n]=='[' || z[n]=='.' ){
+ u32 x, sz = 0;
+ char cSaved = z[n];
+ z[n] = 0;
+ assert( p->sParse.eEdit==0 );
+ x = jsonLookupStep(&p->sParse, 0, z+1, 0);
+ z[n] = cSaved;
+ if( JSON_LOOKUP_ISERROR(x) ) continue;
+ if( x + jsonbPayloadSize(&p->sParse, x, &sz) == p->i ) break;
+ }
+ }
}
+ return n;
}
/* Return the value of a column */
static int jsonEachColumn(
sqlite3_vtab_cursor *cur, /* The cursor */
sqlite3_context *ctx, /* First argument to sqlite3_result_...() */
- int i /* Which column to return */
+ int iColumn /* Which column to return */
){
JsonEachCursor *p = (JsonEachCursor*)cur;
- JsonNode *pThis = &p->sParse.aNode[p->i];
- switch( i ){
+ switch( iColumn ){
case JEACH_KEY: {
- if( p->i==0 ) break;
- if( p->eType==JSON_OBJECT ){
- jsonReturn(pThis, ctx, 0);
- }else if( p->eType==JSON_ARRAY ){
- u32 iKey;
- if( p->bRecursive ){
- if( p->iRowid==0 ) break;
- assert( p->sParse.aNode[p->sParse.aUp[p->i]].eU==3 );
- iKey = p->sParse.aNode[p->sParse.aUp[p->i]].u.iKey;
+ if( p->nParent==0 ){
+ u32 n, j;
+ if( p->nRoot==1 ) break;
+ j = jsonEachPathLength(p);
+ n = p->nRoot - j;
+ if( n==0 ){
+ break;
+ }else if( p->path.zBuf[j]=='[' ){
+ i64 x;
+ sqlite3Atoi64(&p->path.zBuf[j+1], &x, n-1, SQLITE_UTF8);
+ sqlite3_result_int64(ctx, x);
+ }else if( p->path.zBuf[j+1]=='"' ){
+ sqlite3_result_text(ctx, &p->path.zBuf[j+2], n-3, SQLITE_TRANSIENT);
}else{
- iKey = p->iRowid;
+ sqlite3_result_text(ctx, &p->path.zBuf[j+1], n-1, SQLITE_TRANSIENT);
}
- sqlite3_result_int64(ctx, (sqlite3_int64)iKey);
+ break;
+ }
+ if( p->eType==JSONB_OBJECT ){
+ jsonReturnFromBlob(&p->sParse, p->i, ctx, 1);
+ }else{
+ assert( p->eType==JSONB_ARRAY );
+ sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iKey);
}
break;
}
case JEACH_VALUE: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- jsonReturn(pThis, ctx, 0);
+ u32 i = jsonSkipLabel(p);
+ jsonReturnFromBlob(&p->sParse, i, ctx, 1);
break;
}
case JEACH_TYPE: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- sqlite3_result_text(ctx, jsonType[pThis->eType], -1, SQLITE_STATIC);
+ u32 i = jsonSkipLabel(p);
+ u8 eType = p->sParse.aBlob[i] & 0x0f;
+ sqlite3_result_text(ctx, jsonbType[eType], -1, SQLITE_STATIC);
break;
}
case JEACH_ATOM: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- if( pThis->eType>=JSON_ARRAY ) break;
- jsonReturn(pThis, ctx, 0);
+ u32 i = jsonSkipLabel(p);
+ if( (p->sParse.aBlob[i] & 0x0f)<JSONB_ARRAY ){
+ jsonReturnFromBlob(&p->sParse, i, ctx, 1);
+ }
break;
}
case JEACH_ID: {
- sqlite3_result_int64(ctx,
- (sqlite3_int64)p->i + ((pThis->jnFlags & JNODE_LABEL)!=0));
+ sqlite3_result_int64(ctx, (sqlite3_int64)p->i);
break;
}
case JEACH_PARENT: {
- if( p->i>p->iBegin && p->bRecursive ){
- sqlite3_result_int64(ctx, (sqlite3_int64)p->sParse.aUp[p->i]);
+ if( p->nParent>0 && p->bRecursive ){
+ sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iHead);
}
break;
}
case JEACH_FULLKEY: {
- JsonString x;
- jsonInit(&x, ctx);
- if( p->bRecursive ){
- jsonEachComputePath(p, &x, p->i);
- }else{
- if( p->zRoot ){
- jsonAppendRaw(&x, p->zRoot, (int)strlen(p->zRoot));
- }else{
- jsonAppendChar(&x, '$');
- }
- if( p->eType==JSON_ARRAY ){
- jsonPrintf(30, &x, "[%d]", p->iRowid);
- }else if( p->eType==JSON_OBJECT ){
- jsonAppendObjectPathElement(&x, pThis);
- }
- }
- jsonResult(&x);
+ u64 nBase = p->path.nUsed;
+ if( p->nParent ) jsonAppendPathName(p);
+ sqlite3_result_text64(ctx, p->path.zBuf, p->path.nUsed,
+ SQLITE_TRANSIENT, SQLITE_UTF8);
+ p->path.nUsed = nBase;
break;
}
case JEACH_PATH: {
- if( p->bRecursive ){
- JsonString x;
- jsonInit(&x, ctx);
- jsonEachComputePath(p, &x, p->sParse.aUp[p->i]);
- jsonResult(&x);
- break;
- }
- /* For json_each() path and root are the same so fall through
- ** into the root case */
- /* no break */ deliberate_fall_through
+ u32 n = jsonEachPathLength(p);
+ sqlite3_result_text64(ctx, p->path.zBuf, n,
+ SQLITE_TRANSIENT, SQLITE_UTF8);
+ break;
}
default: {
- const char *zRoot = p->zRoot;
- if( zRoot==0 ) zRoot = "$";
- sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC);
+ sqlite3_result_text(ctx, p->path.zBuf, p->nRoot, SQLITE_STATIC);
break;
}
case JEACH_JSON: {
- assert( i==JEACH_JSON );
- sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC);
+ if( p->sParse.zJson==0 ){
+ sqlite3_result_blob(ctx, p->sParse.aBlob, p->sParse.nBlob,
+ SQLITE_STATIC);
+ }else{
+ sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC);
+ }
break;
}
}
@@ -202911,78 +208227,97 @@ static int jsonEachFilter(
int argc, sqlite3_value **argv
){
JsonEachCursor *p = (JsonEachCursor*)cur;
- const char *z;
const char *zRoot = 0;
- sqlite3_int64 n;
+ u32 i, n, sz;
UNUSED_PARAMETER(idxStr);
UNUSED_PARAMETER(argc);
jsonEachCursorReset(p);
if( idxNum==0 ) return SQLITE_OK;
- z = (const char*)sqlite3_value_text(argv[0]);
- if( z==0 ) return SQLITE_OK;
- n = sqlite3_value_bytes(argv[0]);
- p->zJson = sqlite3_malloc64( n+1 );
- if( p->zJson==0 ) return SQLITE_NOMEM;
- memcpy(p->zJson, z, (size_t)n+1);
- if( jsonParse(&p->sParse, 0, p->zJson) ){
- int rc = SQLITE_NOMEM;
- if( p->sParse.oom==0 ){
- sqlite3_free(cur->pVtab->zErrMsg);
- cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON");
- if( cur->pVtab->zErrMsg ) rc = SQLITE_ERROR;
+ memset(&p->sParse, 0, sizeof(p->sParse));
+ p->sParse.nJPRef = 1;
+ p->sParse.db = p->db;
+ if( jsonFuncArgMightBeBinary(argv[0]) ){
+ p->sParse.nBlob = sqlite3_value_bytes(argv[0]);
+ p->sParse.aBlob = (u8*)sqlite3_value_blob(argv[0]);
+ }else{
+ p->sParse.zJson = (char*)sqlite3_value_text(argv[0]);
+ p->sParse.nJson = sqlite3_value_bytes(argv[0]);
+ if( p->sParse.zJson==0 ){
+ p->i = p->iEnd = 0;
+ return SQLITE_OK;
}
- jsonEachCursorReset(p);
- return rc;
- }else if( p->bRecursive && jsonParseFindParents(&p->sParse) ){
- jsonEachCursorReset(p);
- return SQLITE_NOMEM;
- }else{
- JsonNode *pNode = 0;
- if( idxNum==3 ){
- const char *zErr = 0;
- zRoot = (const char*)sqlite3_value_text(argv[1]);
- if( zRoot==0 ) return SQLITE_OK;
- n = sqlite3_value_bytes(argv[1]);
- p->zRoot = sqlite3_malloc64( n+1 );
- if( p->zRoot==0 ) return SQLITE_NOMEM;
- memcpy(p->zRoot, zRoot, (size_t)n+1);
- if( zRoot[0]!='$' ){
- zErr = zRoot;
- }else{
- pNode = jsonLookupStep(&p->sParse, 0, p->zRoot+1, 0, &zErr);
+ if( jsonConvertTextToBlob(&p->sParse, 0) ){
+ if( p->sParse.oom ){
+ return SQLITE_NOMEM;
}
- if( zErr ){
+ goto json_each_malformed_input;
+ }
+ }
+ if( idxNum==3 ){
+ zRoot = (const char*)sqlite3_value_text(argv[1]);
+ if( zRoot==0 ) return SQLITE_OK;
+ if( zRoot[0]!='$' ){
+ sqlite3_free(cur->pVtab->zErrMsg);
+ cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot);
+ jsonEachCursorReset(p);
+ return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
+ }
+ p->nRoot = sqlite3Strlen30(zRoot);
+ if( zRoot[1]==0 ){
+ i = p->i = 0;
+ p->eType = 0;
+ }else{
+ i = jsonLookupStep(&p->sParse, 0, zRoot+1, 0);
+ if( JSON_LOOKUP_ISERROR(i) ){
+ if( i==JSON_LOOKUP_NOTFOUND ){
+ p->i = 0;
+ p->eType = 0;
+ p->iEnd = 0;
+ return SQLITE_OK;
+ }
sqlite3_free(cur->pVtab->zErrMsg);
- cur->pVtab->zErrMsg = jsonPathSyntaxError(zErr);
+ cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot);
jsonEachCursorReset(p);
return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
- }else if( pNode==0 ){
- return SQLITE_OK;
}
- }else{
- pNode = p->sParse.aNode;
- }
- p->iBegin = p->i = (int)(pNode - p->sParse.aNode);
- p->eType = pNode->eType;
- if( p->eType>=JSON_ARRAY ){
- assert( pNode->eU==0 );
- VVA( pNode->eU = 3 );
- pNode->u.iKey = 0;
- p->iEnd = p->i + pNode->n + 1;
- if( p->bRecursive ){
- p->eType = p->sParse.aNode[p->sParse.aUp[p->i]].eType;
- if( p->i>0 && (p->sParse.aNode[p->i-1].jnFlags & JNODE_LABEL)!=0 ){
- p->i--;
- }
+ if( p->sParse.iLabel ){
+ p->i = p->sParse.iLabel;
+ p->eType = JSONB_OBJECT;
}else{
- p->i++;
- }
- }else{
- p->iEnd = p->i+1;
- }
+ p->i = i;
+ p->eType = JSONB_ARRAY;
+ }
+ }
+ jsonAppendRaw(&p->path, zRoot, p->nRoot);
+ }else{
+ i = p->i = 0;
+ p->eType = 0;
+ p->nRoot = 1;
+ jsonAppendRaw(&p->path, "$", 1);
+ }
+ p->nParent = 0;
+ n = jsonbPayloadSize(&p->sParse, i, &sz);
+ p->iEnd = i+n+sz;
+ if( (p->sParse.aBlob[i] & 0x0f)>=JSONB_ARRAY && !p->bRecursive ){
+ p->i = i + n;
+ p->eType = p->sParse.aBlob[i] & 0x0f;
+ p->aParent = sqlite3DbMallocZero(p->db, sizeof(JsonParent));
+ if( p->aParent==0 ) return SQLITE_NOMEM;
+ p->nParent = 1;
+ p->nParentAlloc = 1;
+ p->aParent[0].iKey = 0;
+ p->aParent[0].iEnd = p->iEnd;
+ p->aParent[0].iHead = p->i;
+ p->aParent[0].iValue = i;
}
return SQLITE_OK;
+
+json_each_malformed_input:
+ sqlite3_free(cur->pVtab->zErrMsg);
+ cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON");
+ jsonEachCursorReset(p);
+ return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
}
/* The methods of the json_each virtual table */
@@ -203010,7 +208345,8 @@ static sqlite3_module jsonEachModule = {
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
/* The methods of the json_tree virtual table. */
@@ -203038,7 +208374,8 @@ static sqlite3_module jsonTreeModule = {
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
#endif /* SQLITE_OMIT_VIRTUALTABLE */
#endif /* !defined(SQLITE_OMIT_JSON) */
@@ -203049,34 +208386,57 @@ static sqlite3_module jsonTreeModule = {
SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){
#ifndef SQLITE_OMIT_JSON
static FuncDef aJsonFunc[] = {
- JFUNCTION(json, 1, 0, jsonRemoveFunc),
- JFUNCTION(json_array, -1, 0, jsonArrayFunc),
- JFUNCTION(json_array_length, 1, 0, jsonArrayLengthFunc),
- JFUNCTION(json_array_length, 2, 0, jsonArrayLengthFunc),
- JFUNCTION(json_error_position,1, 0, jsonErrorFunc),
- JFUNCTION(json_extract, -1, 0, jsonExtractFunc),
- JFUNCTION(->, 2, JSON_JSON, jsonExtractFunc),
- JFUNCTION(->>, 2, JSON_SQL, jsonExtractFunc),
- JFUNCTION(json_insert, -1, 0, jsonSetFunc),
- JFUNCTION(json_object, -1, 0, jsonObjectFunc),
- JFUNCTION(json_patch, 2, 0, jsonPatchFunc),
- JFUNCTION(json_quote, 1, 0, jsonQuoteFunc),
- JFUNCTION(json_remove, -1, 0, jsonRemoveFunc),
- JFUNCTION(json_replace, -1, 0, jsonReplaceFunc),
- JFUNCTION(json_set, -1, JSON_ISSET, jsonSetFunc),
- JFUNCTION(json_type, 1, 0, jsonTypeFunc),
- JFUNCTION(json_type, 2, 0, jsonTypeFunc),
- JFUNCTION(json_valid, 1, 0, jsonValidFunc),
+ /* sqlite3_result_subtype() ----, ,--- sqlite3_value_subtype() */
+ /* | | */
+ /* Uses cache ------, | | ,---- Returns JSONB */
+ /* | | | | */
+ /* Number of arguments ---, | | | | ,--- Flags */
+ /* | | | | | | */
+ JFUNCTION(json, 1,1,1, 0,0,0, jsonRemoveFunc),
+ JFUNCTION(jsonb, 1,1,0, 0,1,0, jsonRemoveFunc),
+ JFUNCTION(json_array, -1,0,1, 1,0,0, jsonArrayFunc),
+ JFUNCTION(jsonb_array, -1,0,1, 1,1,0, jsonArrayFunc),
+ JFUNCTION(json_array_length, 1,1,0, 0,0,0, jsonArrayLengthFunc),
+ JFUNCTION(json_array_length, 2,1,0, 0,0,0, jsonArrayLengthFunc),
+ JFUNCTION(json_error_position,1,1,0, 0,0,0, jsonErrorFunc),
+ JFUNCTION(json_extract, -1,1,1, 0,0,0, jsonExtractFunc),
+ JFUNCTION(jsonb_extract, -1,1,0, 0,1,0, jsonExtractFunc),
+ JFUNCTION(->, 2,1,1, 0,0,JSON_JSON, jsonExtractFunc),
+ JFUNCTION(->>, 2,1,0, 0,0,JSON_SQL, jsonExtractFunc),
+ JFUNCTION(json_insert, -1,1,1, 1,0,0, jsonSetFunc),
+ JFUNCTION(jsonb_insert, -1,1,0, 1,1,0, jsonSetFunc),
+ JFUNCTION(json_object, -1,0,1, 1,0,0, jsonObjectFunc),
+ JFUNCTION(jsonb_object, -1,0,1, 1,1,0, jsonObjectFunc),
+ JFUNCTION(json_patch, 2,1,1, 0,0,0, jsonPatchFunc),
+ JFUNCTION(jsonb_patch, 2,1,0, 0,1,0, jsonPatchFunc),
+ JFUNCTION(json_quote, 1,0,1, 1,0,0, jsonQuoteFunc),
+ JFUNCTION(json_remove, -1,1,1, 0,0,0, jsonRemoveFunc),
+ JFUNCTION(jsonb_remove, -1,1,0, 0,1,0, jsonRemoveFunc),
+ JFUNCTION(json_replace, -1,1,1, 1,0,0, jsonReplaceFunc),
+ JFUNCTION(jsonb_replace, -1,1,0, 1,1,0, jsonReplaceFunc),
+ JFUNCTION(json_set, -1,1,1, 1,0,JSON_ISSET, jsonSetFunc),
+ JFUNCTION(jsonb_set, -1,1,0, 1,1,JSON_ISSET, jsonSetFunc),
+ JFUNCTION(json_type, 1,1,0, 0,0,0, jsonTypeFunc),
+ JFUNCTION(json_type, 2,1,0, 0,0,0, jsonTypeFunc),
+ JFUNCTION(json_valid, 1,1,0, 0,0,0, jsonValidFunc),
+ JFUNCTION(json_valid, 2,1,0, 0,0,0, jsonValidFunc),
#if SQLITE_DEBUG
- JFUNCTION(json_parse, 1, 0, jsonParseFunc),
- JFUNCTION(json_test1, 1, 0, jsonTest1Func),
+ JFUNCTION(json_parse, 1,1,0, 0,0,0, jsonParseFunc),
#endif
WAGGREGATE(json_group_array, 1, 0, 0,
jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse,
- SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC),
+ SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|
+ SQLITE_DETERMINISTIC),
+ WAGGREGATE(jsonb_group_array, 1, JSON_BLOB, 0,
+ jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse,
+ SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC),
WAGGREGATE(json_group_object, 2, 0, 0,
jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse,
- SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC)
+ SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC),
+ WAGGREGATE(jsonb_group_object,2, JSON_BLOB, 0,
+ jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse,
+ SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|
+ SQLITE_DETERMINISTIC)
};
sqlite3InsertBuiltinFuncs(aJsonFunc, ArraySize(aJsonFunc));
#endif
@@ -203203,6 +208563,11 @@ typedef unsigned int u32;
#endif
#endif /* !defined(SQLITE_AMALGAMATION) */
+/* Macro to check for 4-byte alignment. Only used inside of assert() */
+#ifdef SQLITE_DEBUG
+# define FOUR_BYTE_ALIGNED(X) ((((char*)(X) - (char*)0) & 3)==0)
+#endif
+
/* #include <string.h> */
/* #include <stdio.h> */
/* #include <assert.h> */
@@ -203268,6 +208633,7 @@ struct Rtree {
int iDepth; /* Current depth of the r-tree structure */
char *zDb; /* Name of database containing r-tree table */
char *zName; /* Name of r-tree table */
+ char *zNodeName; /* Name of the %_node table */
u32 nBusy; /* Current number of users of this structure */
i64 nRowEst; /* Estimated number of rows in this table */
u32 nCursor; /* Number of open cursors */
@@ -203280,7 +208646,6 @@ struct Rtree {
** headed by the node (leaf nodes have RtreeNode.iNode==0).
*/
RtreeNode *pDeleted;
- int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */
/* Blob I/O on xxx_node */
sqlite3_blob *pNodeBlob;
@@ -203577,15 +208942,20 @@ struct RtreeMatchArg {
** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined
** at run-time.
*/
-#ifndef SQLITE_BYTEORDER
-# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \
+#ifndef SQLITE_BYTEORDER /* Replicate changes at tag-20230904a */
+# if defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
+# define SQLITE_BYTEORDER 4321
+# elif defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__
+# define SQLITE_BYTEORDER 1234
+# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__==1
+# define SQLITE_BYTEORDER 4321
+# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \
defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \
defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
-# define SQLITE_BYTEORDER 1234
-# elif defined(sparc) || defined(__ppc__) || \
- defined(__ARMEB__) || defined(__AARCH64EB__)
-# define SQLITE_BYTEORDER 4321
+# define SQLITE_BYTEORDER 1234
+# elif defined(sparc) || defined(__ARMEB__) || defined(__AARCH64EB__)
+# define SQLITE_BYTEORDER 4321
# else
# define SQLITE_BYTEORDER 0
# endif
@@ -203609,7 +208979,7 @@ static int readInt16(u8 *p){
return (p[0]<<8) + p[1];
}
static void readCoord(u8 *p, RtreeCoord *pCoord){
- assert( (((sqlite3_uint64)p)&3)==0 ); /* p is always 4-byte aligned */
+ assert( FOUR_BYTE_ALIGNED(p) );
#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
pCoord->u = _byteswap_ulong(*(u32*)p);
#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
@@ -203663,7 +209033,7 @@ static void writeInt16(u8 *p, int i){
}
static int writeCoord(u8 *p, RtreeCoord *pCoord){
u32 i;
- assert( (((sqlite3_uint64)p)&3)==0 ); /* p is always 4-byte aligned */
+ assert( FOUR_BYTE_ALIGNED(p) );
assert( sizeof(RtreeCoord)==4 );
assert( sizeof(u32)==4 );
#if SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
@@ -203814,7 +209184,7 @@ static int nodeAcquire(
** increase its reference count and return it.
*/
if( (pNode = nodeHashLookup(pRtree, iNode))!=0 ){
- if( pParent && pParent!=pNode->pParent ){
+ if( pParent && ALWAYS(pParent!=pNode->pParent) ){
RTREE_IS_CORRUPT(pRtree);
return SQLITE_CORRUPT_VTAB;
}
@@ -203834,11 +209204,9 @@ static int nodeAcquire(
}
}
if( pRtree->pNodeBlob==0 ){
- char *zTab = sqlite3_mprintf("%s_node", pRtree->zName);
- if( zTab==0 ) return SQLITE_NOMEM;
- rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, zTab, "data", iNode, 0,
+ rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, pRtree->zNodeName,
+ "data", iNode, 0,
&pRtree->pNodeBlob);
- sqlite3_free(zTab);
}
if( rc ){
nodeBlobReset(pRtree);
@@ -204391,7 +209759,7 @@ static void rtreeNonleafConstraint(
assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE
|| p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE
|| p->op==RTREE_FALSE );
- assert( (((sqlite3_uint64)pCellData)&3)==0 ); /* 4-byte aligned */
+ assert( FOUR_BYTE_ALIGNED(pCellData) );
switch( p->op ){
case RTREE_TRUE: return; /* Always satisfied */
case RTREE_FALSE: break; /* Never satisfied */
@@ -204444,7 +209812,7 @@ static void rtreeLeafConstraint(
|| p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE
|| p->op==RTREE_FALSE );
pCellData += 8 + p->iCoord*4;
- assert( (((sqlite3_uint64)pCellData)&3)==0 ); /* 4-byte aligned */
+ assert( FOUR_BYTE_ALIGNED(pCellData) );
RTREE_DECODE_COORD(eInt, pCellData, xN);
switch( p->op ){
case RTREE_TRUE: return; /* Always satisfied */
@@ -205014,7 +210382,20 @@ static int rtreeFilter(
p->pInfo->nCoord = pRtree->nDim2;
p->pInfo->anQueue = pCsr->anQueue;
p->pInfo->mxLevel = pRtree->iDepth + 1;
- }else if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+ }else if( eType==SQLITE_INTEGER ){
+ sqlite3_int64 iVal = sqlite3_value_int64(argv[ii]);
+#ifdef SQLITE_RTREE_INT_ONLY
+ p->u.rValue = iVal;
+#else
+ p->u.rValue = (double)iVal;
+ if( iVal>=((sqlite3_int64)1)<<48
+ || iVal<=-(((sqlite3_int64)1)<<48)
+ ){
+ if( p->op==RTREE_LT ) p->op = RTREE_LE;
+ if( p->op==RTREE_GT ) p->op = RTREE_GE;
+ }
+#endif
+ }else if( eType==SQLITE_FLOAT ){
#ifdef SQLITE_RTREE_INT_ONLY
p->u.rValue = sqlite3_value_int64(argv[ii]);
#else
@@ -205145,11 +210526,12 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
|| p->op==SQLITE_INDEX_CONSTRAINT_MATCH)
){
u8 op;
+ u8 doOmit = 1;
switch( p->op ){
- case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; break;
- case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; break;
+ case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; doOmit = 0; break;
+ case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; doOmit = 0; break;
case SQLITE_INDEX_CONSTRAINT_LE: op = RTREE_LE; break;
- case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; break;
+ case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; doOmit = 0; break;
case SQLITE_INDEX_CONSTRAINT_GE: op = RTREE_GE; break;
case SQLITE_INDEX_CONSTRAINT_MATCH: op = RTREE_MATCH; break;
default: op = 0; break;
@@ -205158,15 +210540,19 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
zIdxStr[iIdx++] = op;
zIdxStr[iIdx++] = (char)(p->iColumn - 1 + '0');
pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2);
- pIdxInfo->aConstraintUsage[ii].omit = 1;
+ pIdxInfo->aConstraintUsage[ii].omit = doOmit;
}
}
}
pIdxInfo->idxNum = 2;
pIdxInfo->needToFreeIdxStr = 1;
- if( iIdx>0 && 0==(pIdxInfo->idxStr = sqlite3_mprintf("%s", zIdxStr)) ){
- return SQLITE_NOMEM;
+ if( iIdx>0 ){
+ pIdxInfo->idxStr = sqlite3_malloc( iIdx+1 );
+ if( pIdxInfo->idxStr==0 ){
+ return SQLITE_NOMEM;
+ }
+ memcpy(pIdxInfo->idxStr, zIdxStr, iIdx+1);
}
nRow = pRtree->nRowEst >> (iIdx/2);
@@ -205245,31 +210631,22 @@ static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){
*/
static int cellContains(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){
int ii;
- int isInt = (pRtree->eCoordType==RTREE_COORD_INT32);
- for(ii=0; ii<pRtree->nDim2; ii+=2){
- RtreeCoord *a1 = &p1->aCoord[ii];
- RtreeCoord *a2 = &p2->aCoord[ii];
- if( (!isInt && (a2[0].f<a1[0].f || a2[1].f>a1[1].f))
- || ( isInt && (a2[0].i<a1[0].i || a2[1].i>a1[1].i))
- ){
- return 0;
+ if( pRtree->eCoordType==RTREE_COORD_INT32 ){
+ for(ii=0; ii<pRtree->nDim2; ii+=2){
+ RtreeCoord *a1 = &p1->aCoord[ii];
+ RtreeCoord *a2 = &p2->aCoord[ii];
+ if( a2[0].i<a1[0].i || a2[1].i>a1[1].i ) return 0;
+ }
+ }else{
+ for(ii=0; ii<pRtree->nDim2; ii+=2){
+ RtreeCoord *a1 = &p1->aCoord[ii];
+ RtreeCoord *a2 = &p2->aCoord[ii];
+ if( a2[0].f<a1[0].f || a2[1].f>a1[1].f ) return 0;
}
}
return 1;
}
-/*
-** Return the amount cell p would grow by if it were unioned with pCell.
-*/
-static RtreeDValue cellGrowth(Rtree *pRtree, RtreeCell *p, RtreeCell *pCell){
- RtreeDValue area;
- RtreeCell cell;
- memcpy(&cell, p, sizeof(RtreeCell));
- area = cellArea(pRtree, &cell);
- cellUnion(pRtree, &cell, pCell);
- return (cellArea(pRtree, &cell)-area);
-}
-
static RtreeDValue cellOverlap(
Rtree *pRtree,
RtreeCell *p,
@@ -205316,38 +210693,52 @@ static int ChooseLeaf(
for(ii=0; rc==SQLITE_OK && ii<(pRtree->iDepth-iHeight); ii++){
int iCell;
sqlite3_int64 iBest = 0;
-
+ int bFound = 0;
RtreeDValue fMinGrowth = RTREE_ZERO;
RtreeDValue fMinArea = RTREE_ZERO;
-
int nCell = NCELL(pNode);
- RtreeCell cell;
RtreeNode *pChild = 0;
- RtreeCell *aCell = 0;
-
- /* Select the child node which will be enlarged the least if pCell
- ** is inserted into it. Resolve ties by choosing the entry with
- ** the smallest area.
+ /* First check to see if there is are any cells in pNode that completely
+ ** contains pCell. If two or more cells in pNode completely contain pCell
+ ** then pick the smallest.
*/
for(iCell=0; iCell<nCell; iCell++){
- int bBest = 0;
- RtreeDValue growth;
- RtreeDValue area;
+ RtreeCell cell;
nodeGetCell(pRtree, pNode, iCell, &cell);
- growth = cellGrowth(pRtree, &cell, pCell);
- area = cellArea(pRtree, &cell);
- if( iCell==0||growth<fMinGrowth||(growth==fMinGrowth && area<fMinArea) ){
- bBest = 1;
+ if( cellContains(pRtree, &cell, pCell) ){
+ RtreeDValue area = cellArea(pRtree, &cell);
+ if( bFound==0 || area<fMinArea ){
+ iBest = cell.iRowid;
+ fMinArea = area;
+ bFound = 1;
+ }
}
- if( bBest ){
- fMinGrowth = growth;
- fMinArea = area;
- iBest = cell.iRowid;
+ }
+ if( !bFound ){
+ /* No cells of pNode will completely contain pCell. So pick the
+ ** cell of pNode that grows by the least amount when pCell is added.
+ ** Break ties by selecting the smaller cell.
+ */
+ for(iCell=0; iCell<nCell; iCell++){
+ RtreeCell cell;
+ RtreeDValue growth;
+ RtreeDValue area;
+ nodeGetCell(pRtree, pNode, iCell, &cell);
+ area = cellArea(pRtree, &cell);
+ cellUnion(pRtree, &cell, pCell);
+ growth = cellArea(pRtree, &cell)-area;
+ if( iCell==0
+ || growth<fMinGrowth
+ || (growth==fMinGrowth && area<fMinArea)
+ ){
+ fMinGrowth = growth;
+ fMinArea = area;
+ iBest = cell.iRowid;
+ }
}
}
- sqlite3_free(aCell);
rc = nodeAcquire(pRtree, iBest, pNode, &pChild);
nodeRelease(pRtree, pNode);
pNode = pChild;
@@ -205420,77 +210811,6 @@ static int parentWrite(Rtree *pRtree, sqlite3_int64 iNode, sqlite3_int64 iPar){
static int rtreeInsertCell(Rtree *, RtreeNode *, RtreeCell *, int);
-/*
-** Arguments aIdx, aDistance and aSpare all point to arrays of size
-** nIdx. The aIdx array contains the set of integers from 0 to
-** (nIdx-1) in no particular order. This function sorts the values
-** in aIdx according to the indexed values in aDistance. For
-** example, assuming the inputs:
-**
-** aIdx = { 0, 1, 2, 3 }
-** aDistance = { 5.0, 2.0, 7.0, 6.0 }
-**
-** this function sets the aIdx array to contain:
-**
-** aIdx = { 0, 1, 2, 3 }
-**
-** The aSpare array is used as temporary working space by the
-** sorting algorithm.
-*/
-static void SortByDistance(
- int *aIdx,
- int nIdx,
- RtreeDValue *aDistance,
- int *aSpare
-){
- if( nIdx>1 ){
- int iLeft = 0;
- int iRight = 0;
-
- int nLeft = nIdx/2;
- int nRight = nIdx-nLeft;
- int *aLeft = aIdx;
- int *aRight = &aIdx[nLeft];
-
- SortByDistance(aLeft, nLeft, aDistance, aSpare);
- SortByDistance(aRight, nRight, aDistance, aSpare);
-
- memcpy(aSpare, aLeft, sizeof(int)*nLeft);
- aLeft = aSpare;
-
- while( iLeft<nLeft || iRight<nRight ){
- if( iLeft==nLeft ){
- aIdx[iLeft+iRight] = aRight[iRight];
- iRight++;
- }else if( iRight==nRight ){
- aIdx[iLeft+iRight] = aLeft[iLeft];
- iLeft++;
- }else{
- RtreeDValue fLeft = aDistance[aLeft[iLeft]];
- RtreeDValue fRight = aDistance[aRight[iRight]];
- if( fLeft<fRight ){
- aIdx[iLeft+iRight] = aLeft[iLeft];
- iLeft++;
- }else{
- aIdx[iLeft+iRight] = aRight[iRight];
- iRight++;
- }
- }
- }
-
-#if 0
- /* Check that the sort worked */
- {
- int jj;
- for(jj=1; jj<nIdx; jj++){
- RtreeDValue left = aDistance[aIdx[jj-1]];
- RtreeDValue right = aDistance[aIdx[jj]];
- assert( left<=right );
- }
- }
-#endif
- }
-}
/*
** Arguments aIdx, aCell and aSpare all point to arrays of size
@@ -205975,107 +211295,6 @@ static int deleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell, int iHeight){
return rc;
}
-static int Reinsert(
- Rtree *pRtree,
- RtreeNode *pNode,
- RtreeCell *pCell,
- int iHeight
-){
- int *aOrder;
- int *aSpare;
- RtreeCell *aCell;
- RtreeDValue *aDistance;
- int nCell;
- RtreeDValue aCenterCoord[RTREE_MAX_DIMENSIONS];
- int iDim;
- int ii;
- int rc = SQLITE_OK;
- int n;
-
- memset(aCenterCoord, 0, sizeof(RtreeDValue)*RTREE_MAX_DIMENSIONS);
-
- nCell = NCELL(pNode)+1;
- n = (nCell+1)&(~1);
-
- /* Allocate the buffers used by this operation. The allocation is
- ** relinquished before this function returns.
- */
- aCell = (RtreeCell *)sqlite3_malloc64(n * (
- sizeof(RtreeCell) + /* aCell array */
- sizeof(int) + /* aOrder array */
- sizeof(int) + /* aSpare array */
- sizeof(RtreeDValue) /* aDistance array */
- ));
- if( !aCell ){
- return SQLITE_NOMEM;
- }
- aOrder = (int *)&aCell[n];
- aSpare = (int *)&aOrder[n];
- aDistance = (RtreeDValue *)&aSpare[n];
-
- for(ii=0; ii<nCell; ii++){
- if( ii==(nCell-1) ){
- memcpy(&aCell[ii], pCell, sizeof(RtreeCell));
- }else{
- nodeGetCell(pRtree, pNode, ii, &aCell[ii]);
- }
- aOrder[ii] = ii;
- for(iDim=0; iDim<pRtree->nDim; iDim++){
- aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2]);
- aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2+1]);
- }
- }
- for(iDim=0; iDim<pRtree->nDim; iDim++){
- aCenterCoord[iDim] = (aCenterCoord[iDim]/(nCell*(RtreeDValue)2));
- }
-
- for(ii=0; ii<nCell; ii++){
- aDistance[ii] = RTREE_ZERO;
- for(iDim=0; iDim<pRtree->nDim; iDim++){
- RtreeDValue coord = (DCOORD(aCell[ii].aCoord[iDim*2+1]) -
- DCOORD(aCell[ii].aCoord[iDim*2]));
- aDistance[ii] += (coord-aCenterCoord[iDim])*(coord-aCenterCoord[iDim]);
- }
- }
-
- SortByDistance(aOrder, nCell, aDistance, aSpare);
- nodeZero(pRtree, pNode);
-
- for(ii=0; rc==SQLITE_OK && ii<(nCell-(RTREE_MINCELLS(pRtree)+1)); ii++){
- RtreeCell *p = &aCell[aOrder[ii]];
- nodeInsertCell(pRtree, pNode, p);
- if( p->iRowid==pCell->iRowid ){
- if( iHeight==0 ){
- rc = rowidWrite(pRtree, p->iRowid, pNode->iNode);
- }else{
- rc = parentWrite(pRtree, p->iRowid, pNode->iNode);
- }
- }
- }
- if( rc==SQLITE_OK ){
- rc = fixBoundingBox(pRtree, pNode);
- }
- for(; rc==SQLITE_OK && ii<nCell; ii++){
- /* Find a node to store this cell in. pNode->iNode currently contains
- ** the height of the sub-tree headed by the cell.
- */
- RtreeNode *pInsert;
- RtreeCell *p = &aCell[aOrder[ii]];
- rc = ChooseLeaf(pRtree, p, iHeight, &pInsert);
- if( rc==SQLITE_OK ){
- int rc2;
- rc = rtreeInsertCell(pRtree, pInsert, p, iHeight);
- rc2 = nodeRelease(pRtree, pInsert);
- if( rc==SQLITE_OK ){
- rc = rc2;
- }
- }
- }
-
- sqlite3_free(aCell);
- return rc;
-}
-
/*
** Insert cell pCell into node pNode. Node pNode is the head of a
** subtree iHeight high (leaf nodes have iHeight==0).
@@ -206096,12 +211315,7 @@ static int rtreeInsertCell(
}
}
if( nodeInsertCell(pRtree, pNode, pCell) ){
- if( iHeight<=pRtree->iReinsertHeight || pNode->iNode==1){
- rc = SplitNode(pRtree, pNode, pCell, iHeight);
- }else{
- pRtree->iReinsertHeight = iHeight;
- rc = Reinsert(pRtree, pNode, pCell, iHeight);
- }
+ rc = SplitNode(pRtree, pNode, pCell, iHeight);
}else{
rc = AdjustTree(pRtree, pNode, pCell);
if( ALWAYS(rc==SQLITE_OK) ){
@@ -206444,7 +211658,6 @@ static int rtreeUpdate(
}
if( rc==SQLITE_OK ){
int rc2;
- pRtree->iReinsertHeight = -1;
rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0);
rc2 = nodeRelease(pRtree, pLeaf);
if( rc==SQLITE_OK ){
@@ -206585,8 +211798,11 @@ static int rtreeShadowName(const char *zName){
return 0;
}
+/* Forward declaration */
+static int rtreeIntegrity(sqlite3_vtab*, const char*, const char*, int, char**);
+
static sqlite3_module rtreeModule = {
- 3, /* iVersion */
+ 4, /* iVersion */
rtreeCreate, /* xCreate - create a table */
rtreeConnect, /* xConnect - connect to an existing table */
rtreeBestIndex, /* xBestIndex - Determine search strategy */
@@ -206609,7 +211825,8 @@ static sqlite3_module rtreeModule = {
rtreeSavepoint, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- rtreeShadowName /* xShadowName */
+ rtreeShadowName, /* xShadowName */
+ rtreeIntegrity /* xIntegrity */
};
static int rtreeSqlInit(
@@ -206702,7 +211919,7 @@ static int rtreeSqlInit(
}
sqlite3_free(zSql);
}
- if( pRtree->nAux ){
+ if( pRtree->nAux && rc!=SQLITE_NOMEM ){
pRtree->zReadAuxSql = sqlite3_mprintf(
"SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1",
zDb, zPrefix);
@@ -206865,22 +212082,27 @@ static int rtreeInit(
}
sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1);
+ sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS);
+
/* Allocate the sqlite3_vtab structure */
nDb = (int)strlen(argv[1]);
nName = (int)strlen(argv[2]);
- pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName+2);
+ pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName*2+8);
if( !pRtree ){
return SQLITE_NOMEM;
}
- memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2);
+ memset(pRtree, 0, sizeof(Rtree)+nDb+nName*2+8);
pRtree->nBusy = 1;
pRtree->base.pModule = &rtreeModule;
pRtree->zDb = (char *)&pRtree[1];
pRtree->zName = &pRtree->zDb[nDb+1];
+ pRtree->zNodeName = &pRtree->zName[nName+1];
pRtree->eCoordType = (u8)eCoordType;
memcpy(pRtree->zDb, argv[1], nDb);
memcpy(pRtree->zName, argv[2], nName);
+ memcpy(pRtree->zNodeName, argv[2], nName);
+ memcpy(&pRtree->zNodeName[nName], "_node", 6);
/* Create/Connect to the underlying relational database schema. If
@@ -207377,7 +212599,6 @@ static int rtreeCheckTable(
){
RtreeCheck check; /* Common context for various routines */
sqlite3_stmt *pStmt = 0; /* Used to find column count of rtree table */
- int bEnd = 0; /* True if transaction should be closed */
int nAux = 0; /* Number of extra columns. */
/* Initialize the context object */
@@ -207386,24 +212607,14 @@ static int rtreeCheckTable(
check.zDb = zDb;
check.zTab = zTab;
- /* If there is not already an open transaction, open one now. This is
- ** to ensure that the queries run as part of this integrity-check operate
- ** on a consistent snapshot. */
- if( sqlite3_get_autocommit(db) ){
- check.rc = sqlite3_exec(db, "BEGIN", 0, 0, 0);
- bEnd = 1;
- }
-
/* Find the number of auxiliary columns */
- if( check.rc==SQLITE_OK ){
- pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab);
- if( pStmt ){
- nAux = sqlite3_column_count(pStmt) - 2;
- sqlite3_finalize(pStmt);
- }else
- if( check.rc!=SQLITE_NOMEM ){
- check.rc = SQLITE_OK;
- }
+ pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab);
+ if( pStmt ){
+ nAux = sqlite3_column_count(pStmt) - 2;
+ sqlite3_finalize(pStmt);
+ }else
+ if( check.rc!=SQLITE_NOMEM ){
+ check.rc = SQLITE_OK;
}
/* Find number of dimensions in the rtree table. */
@@ -207434,16 +212645,36 @@ static int rtreeCheckTable(
sqlite3_finalize(check.aCheckMapping[0]);
sqlite3_finalize(check.aCheckMapping[1]);
- /* If one was opened, close the transaction */
- if( bEnd ){
- int rc = sqlite3_exec(db, "END", 0, 0, 0);
- if( check.rc==SQLITE_OK ) check.rc = rc;
- }
*pzReport = check.zReport;
return check.rc;
}
/*
+** Implementation of the xIntegrity method for Rtree.
+*/
+static int rtreeIntegrity(
+ sqlite3_vtab *pVtab, /* The virtual table to check */
+ const char *zSchema, /* Schema in which the virtual table lives */
+ const char *zName, /* Name of the virtual table */
+ int isQuick, /* True for a quick_check */
+ char **pzErr /* Write results here */
+){
+ Rtree *pRtree = (Rtree*)pVtab;
+ int rc;
+ assert( pzErr!=0 && *pzErr==0 );
+ UNUSED_PARAMETER(zSchema);
+ UNUSED_PARAMETER(zName);
+ UNUSED_PARAMETER(isQuick);
+ rc = rtreeCheckTable(pRtree->db, pRtree->zDb, pRtree->zName, pzErr);
+ if( rc==SQLITE_OK && *pzErr ){
+ *pzErr = sqlite3_mprintf("In RTree %s.%s:\n%z",
+ pRtree->zDb, pRtree->zName, *pzErr);
+ if( (*pzErr)==0 ) rc = SQLITE_NOMEM;
+ }
+ return rc;
+}
+
+/*
** Usage:
**
** rtreecheck(<rtree-table>);
@@ -208764,24 +213995,28 @@ static int geopolyInit(
(void)pAux;
sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1);
+ sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS);
/* Allocate the sqlite3_vtab structure */
nDb = strlen(argv[1]);
nName = strlen(argv[2]);
- pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName+2);
+ pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName*2+8);
if( !pRtree ){
return SQLITE_NOMEM;
}
- memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2);
+ memset(pRtree, 0, sizeof(Rtree)+nDb+nName*2+8);
pRtree->nBusy = 1;
pRtree->base.pModule = &rtreeModule;
pRtree->zDb = (char *)&pRtree[1];
pRtree->zName = &pRtree->zDb[nDb+1];
+ pRtree->zNodeName = &pRtree->zName[nName+1];
pRtree->eCoordType = RTREE_COORD_REAL32;
pRtree->nDim = 2;
pRtree->nDim2 = 4;
memcpy(pRtree->zDb, argv[1], nDb);
memcpy(pRtree->zName, argv[2], nName);
+ memcpy(pRtree->zNodeName, argv[2], nName);
+ memcpy(&pRtree->zNodeName[nName], "_node", 6);
/* Create/Connect to the underlying relational database schema. If
@@ -209195,7 +214430,6 @@ static int geopolyUpdate(
}
if( rc==SQLITE_OK ){
int rc2;
- pRtree->iReinsertHeight = -1;
rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0);
rc2 = nodeRelease(pRtree, pLeaf);
if( rc==SQLITE_OK ){
@@ -209292,7 +214526,8 @@ static sqlite3_module geopolyModule = {
rtreeSavepoint, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- rtreeShadowName /* xShadowName */
+ rtreeShadowName, /* xShadowName */
+ rtreeIntegrity /* xIntegrity */
};
static int sqlite3_geopoly_init(sqlite3 *db){
@@ -217306,7 +222541,8 @@ SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3 *db){
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
return sqlite3_create_module(db, "dbstat", &dbstat_module, 0);
}
@@ -217743,7 +222979,8 @@ SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
return sqlite3_create_module(db, "sqlite_dbpage", &dbpage_module, 0);
}
@@ -217874,6 +223111,18 @@ struct sqlite3_changeset_iter {
** The data associated with each hash-table entry is a structure containing
** a subset of the initial values that the modified row contained at the
** start of the session. Or no initial values if the row was inserted.
+**
+** pDfltStmt:
+** This is only used by the sqlite3changegroup_xxx() APIs, not by
+** regular sqlite3_session objects. It is a SELECT statement that
+** selects the default value for each table column. For example,
+** if the table is
+**
+** CREATE TABLE xx(a DEFAULT 1, b, c DEFAULT 'abc')
+**
+** then this variable is the compiled version of:
+**
+** SELECT 1, NULL, 'abc'
*/
struct SessionTable {
SessionTable *pNext;
@@ -217882,10 +223131,12 @@ struct SessionTable {
int bStat1; /* True if this is sqlite_stat1 */
int bRowid; /* True if this table uses rowid for PK */
const char **azCol; /* Column names */
+ const char **azDflt; /* Default value expressions */
u8 *abPK; /* Array of primary key flags */
int nEntry; /* Total number of entries in hash table */
int nChange; /* Size of apChange[] array */
SessionChange **apChange; /* Hash table buckets */
+ sqlite3_stmt *pDfltStmt;
};
/*
@@ -218054,6 +223305,7 @@ struct SessionTable {
struct SessionChange {
u8 op; /* One of UPDATE, DELETE, INSERT */
u8 bIndirect; /* True if this change is "indirect" */
+ u16 nRecordField; /* Number of fields in aRecord[] */
int nMaxSize; /* Max size of eventual changeset record */
int nRecord; /* Number of bytes in buffer aRecord[] */
u8 *aRecord; /* Buffer containing old.* record */
@@ -218079,7 +223331,7 @@ static int sessionVarintLen(int iVal){
** Read a varint value from aBuf[] into *piVal. Return the number of
** bytes read.
*/
-static int sessionVarintGet(u8 *aBuf, int *piVal){
+static int sessionVarintGet(const u8 *aBuf, int *piVal){
return getVarint32(aBuf, *piVal);
}
@@ -218342,9 +223594,11 @@ static int sessionPreupdateHash(
** Return the number of bytes of space occupied by the value (including
** the type byte).
*/
-static int sessionSerialLen(u8 *a){
- int e = *a;
+static int sessionSerialLen(const u8 *a){
+ int e;
int n;
+ assert( a!=0 );
+ e = *a;
if( e==0 || e==0xFF ) return 1;
if( e==SQLITE_NULL ) return 1;
if( e==SQLITE_INTEGER || e==SQLITE_FLOAT ) return 9;
@@ -218646,6 +223900,7 @@ static int sessionPreupdateEqual(
rc = pSession->hook.xOld(pSession->hook.pCtx, iCol, &pVal);
}
assert( rc==SQLITE_OK );
+ (void)rc; /* Suppress warning about unused variable */
if( sqlite3_value_type(pVal)!=eType ) return 0;
/* A SessionChange object never has a NULL value in a PK column */
@@ -218748,13 +224003,14 @@ static int sessionGrowHash(
**
** For example, if the table is declared as:
**
-** CREATE TABLE tbl1(w, x, y, z, PRIMARY KEY(w, z));
+** CREATE TABLE tbl1(w, x DEFAULT 'abc', y, z, PRIMARY KEY(w, z));
**
-** Then the four output variables are populated as follows:
+** Then the five output variables are populated as follows:
**
** *pnCol = 4
** *pzTab = "tbl1"
** *pazCol = {"w", "x", "y", "z"}
+** *pazDflt = {NULL, 'abc', NULL, NULL}
** *pabPK = {1, 0, 0, 1}
**
** All returned buffers are part of the same single allocation, which must
@@ -218768,6 +224024,7 @@ static int sessionTableInfo(
int *pnCol, /* OUT: number of columns */
const char **pzTab, /* OUT: Copy of zThis */
const char ***pazCol, /* OUT: Array of column names for table */
+ const char ***pazDflt, /* OUT: Array of default value expressions */
u8 **pabPK, /* OUT: Array of booleans - true for PK col */
int *pbRowid /* OUT: True if only PK is a rowid */
){
@@ -218780,11 +224037,18 @@ static int sessionTableInfo(
int i;
u8 *pAlloc = 0;
char **azCol = 0;
+ char **azDflt = 0;
u8 *abPK = 0;
int bRowid = 0; /* Set to true to use rowid as PK */
assert( pazCol && pabPK );
+ *pazCol = 0;
+ *pabPK = 0;
+ *pnCol = 0;
+ if( pzTab ) *pzTab = 0;
+ if( pazDflt ) *pazDflt = 0;
+
nThis = sqlite3Strlen30(zThis);
if( nThis==12 && 0==sqlite3_stricmp("sqlite_stat1", zThis) ){
rc = sqlite3_table_column_metadata(db, zDb, zThis, 0, 0, 0, 0, 0, 0);
@@ -218798,39 +224062,28 @@ static int sessionTableInfo(
}else if( rc==SQLITE_ERROR ){
zPragma = sqlite3_mprintf("");
}else{
- *pazCol = 0;
- *pabPK = 0;
- *pnCol = 0;
- if( pzTab ) *pzTab = 0;
return rc;
}
}else{
zPragma = sqlite3_mprintf("PRAGMA '%q'.table_info('%q')", zDb, zThis);
}
if( !zPragma ){
- *pazCol = 0;
- *pabPK = 0;
- *pnCol = 0;
- if( pzTab ) *pzTab = 0;
return SQLITE_NOMEM;
}
rc = sqlite3_prepare_v2(db, zPragma, -1, &pStmt, 0);
sqlite3_free(zPragma);
if( rc!=SQLITE_OK ){
- *pazCol = 0;
- *pabPK = 0;
- *pnCol = 0;
- if( pzTab ) *pzTab = 0;
return rc;
}
nByte = nThis + 1;
bRowid = (pbRowid!=0);
while( SQLITE_ROW==sqlite3_step(pStmt) ){
- nByte += sqlite3_column_bytes(pStmt, 1);
+ nByte += sqlite3_column_bytes(pStmt, 1); /* name */
+ nByte += sqlite3_column_bytes(pStmt, 4); /* dflt_value */
nDbCol++;
- if( sqlite3_column_int(pStmt, 5) ) bRowid = 0;
+ if( sqlite3_column_int(pStmt, 5) ) bRowid = 0; /* pk */
}
if( nDbCol==0 ) bRowid = 0;
nDbCol += bRowid;
@@ -218838,15 +224091,18 @@ static int sessionTableInfo(
rc = sqlite3_reset(pStmt);
if( rc==SQLITE_OK ){
- nByte += nDbCol * (sizeof(const char *) + sizeof(u8) + 1);
+ nByte += nDbCol * (sizeof(const char *)*2 + sizeof(u8) + 1 + 1);
pAlloc = sessionMalloc64(pSession, nByte);
if( pAlloc==0 ){
rc = SQLITE_NOMEM;
+ }else{
+ memset(pAlloc, 0, nByte);
}
}
if( rc==SQLITE_OK ){
azCol = (char **)pAlloc;
- pAlloc = (u8 *)&azCol[nDbCol];
+ azDflt = (char**)&azCol[nDbCol];
+ pAlloc = (u8 *)&azDflt[nDbCol];
abPK = (u8 *)pAlloc;
pAlloc = &abPK[nDbCol];
if( pzTab ){
@@ -218866,11 +224122,21 @@ static int sessionTableInfo(
}
while( SQLITE_ROW==sqlite3_step(pStmt) ){
int nName = sqlite3_column_bytes(pStmt, 1);
+ int nDflt = sqlite3_column_bytes(pStmt, 4);
const unsigned char *zName = sqlite3_column_text(pStmt, 1);
+ const unsigned char *zDflt = sqlite3_column_text(pStmt, 4);
+
if( zName==0 ) break;
memcpy(pAlloc, zName, nName+1);
azCol[i] = (char *)pAlloc;
pAlloc += nName+1;
+ if( zDflt ){
+ memcpy(pAlloc, zDflt, nDflt+1);
+ azDflt[i] = (char *)pAlloc;
+ pAlloc += nDflt+1;
+ }else{
+ azDflt[i] = 0;
+ }
abPK[i] = sqlite3_column_int(pStmt, 5);
i++;
}
@@ -218881,14 +224147,11 @@ static int sessionTableInfo(
** free any allocation made. An error code will be returned in this case.
*/
if( rc==SQLITE_OK ){
- *pazCol = (const char **)azCol;
+ *pazCol = (const char**)azCol;
+ if( pazDflt ) *pazDflt = (const char**)azDflt;
*pabPK = abPK;
*pnCol = nDbCol;
}else{
- *pazCol = 0;
- *pabPK = 0;
- *pnCol = 0;
- if( pzTab ) *pzTab = 0;
sessionFree(pSession, azCol);
}
if( pbRowid ) *pbRowid = bRowid;
@@ -218897,10 +224160,9 @@ static int sessionTableInfo(
}
/*
-** This function is only called from within a pre-update handler for a
-** write to table pTab, part of session pSession. If this is the first
-** write to this table, initalize the SessionTable.nCol, azCol[] and
-** abPK[] arrays accordingly.
+** This function is called to initialize the SessionTable.nCol, azCol[]
+** abPK[] and azDflt[] members of SessionTable object pTab. If these
+** fields are already initilialized, this function is a no-op.
**
** If an error occurs, an error code is stored in sqlite3_session.rc and
** non-zero returned. Or, if no error occurs but the table has no primary
@@ -218908,15 +224170,22 @@ static int sessionTableInfo(
** indicate that updates on this table should be ignored. SessionTable.abPK
** is set to NULL in this case.
*/
-static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){
+static int sessionInitTable(
+ sqlite3_session *pSession, /* Optional session handle */
+ SessionTable *pTab, /* Table object to initialize */
+ sqlite3 *db, /* Database handle to read schema from */
+ const char *zDb /* Name of db - "main", "temp" etc. */
+){
+ int rc = SQLITE_OK;
+
if( pTab->nCol==0 ){
u8 *abPK;
assert( pTab->azCol==0 || pTab->abPK==0 );
- pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb,
- pTab->zName, &pTab->nCol, 0, &pTab->azCol, &abPK,
- (pSession->bImplicitPK ? &pTab->bRowid : 0)
+ rc = sessionTableInfo(pSession, db, zDb,
+ pTab->zName, &pTab->nCol, 0, &pTab->azCol, &pTab->azDflt, &abPK,
+ ((pSession==0 || pSession->bImplicitPK) ? &pTab->bRowid : 0)
);
- if( pSession->rc==SQLITE_OK ){
+ if( rc==SQLITE_OK ){
int i;
for(i=0; i<pTab->nCol; i++){
if( abPK[i] ){
@@ -218928,14 +224197,321 @@ static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){
pTab->bStat1 = 1;
}
- if( pSession->bEnableSize ){
+ if( pSession && pSession->bEnableSize ){
pSession->nMaxChangesetSize += (
1 + sessionVarintLen(pTab->nCol) + pTab->nCol + strlen(pTab->zName)+1
);
}
}
}
- return (pSession->rc || pTab->abPK==0);
+
+ if( pSession ){
+ pSession->rc = rc;
+ return (rc || pTab->abPK==0);
+ }
+ return rc;
+}
+
+/*
+** Re-initialize table object pTab.
+*/
+static int sessionReinitTable(sqlite3_session *pSession, SessionTable *pTab){
+ int nCol = 0;
+ const char **azCol = 0;
+ const char **azDflt = 0;
+ u8 *abPK = 0;
+ int bRowid = 0;
+
+ assert( pSession->rc==SQLITE_OK );
+
+ pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb,
+ pTab->zName, &nCol, 0, &azCol, &azDflt, &abPK,
+ (pSession->bImplicitPK ? &bRowid : 0)
+ );
+ if( pSession->rc==SQLITE_OK ){
+ if( pTab->nCol>nCol || pTab->bRowid!=bRowid ){
+ pSession->rc = SQLITE_SCHEMA;
+ }else{
+ int ii;
+ int nOldCol = pTab->nCol;
+ for(ii=0; ii<nCol; ii++){
+ if( ii<pTab->nCol ){
+ if( pTab->abPK[ii]!=abPK[ii] ){
+ pSession->rc = SQLITE_SCHEMA;
+ }
+ }else if( abPK[ii] ){
+ pSession->rc = SQLITE_SCHEMA;
+ }
+ }
+
+ if( pSession->rc==SQLITE_OK ){
+ const char **a = pTab->azCol;
+ pTab->azCol = azCol;
+ pTab->nCol = nCol;
+ pTab->azDflt = azDflt;
+ pTab->abPK = abPK;
+ azCol = a;
+ }
+ if( pSession->bEnableSize ){
+ pSession->nMaxChangesetSize += (nCol - nOldCol);
+ pSession->nMaxChangesetSize += sessionVarintLen(nCol);
+ pSession->nMaxChangesetSize -= sessionVarintLen(nOldCol);
+ }
+ }
+ }
+
+ sqlite3_free((char*)azCol);
+ return pSession->rc;
+}
+
+/*
+** Session-change object (*pp) contains an old.* record with fewer than
+** nCol fields. This function updates it with the default values for
+** the missing fields.
+*/
+static void sessionUpdateOneChange(
+ sqlite3_session *pSession, /* For memory accounting */
+ int *pRc, /* IN/OUT: Error code */
+ SessionChange **pp, /* IN/OUT: Change object to update */
+ int nCol, /* Number of columns now in table */
+ sqlite3_stmt *pDflt /* SELECT <default-values...> */
+){
+ SessionChange *pOld = *pp;
+
+ while( pOld->nRecordField<nCol ){
+ SessionChange *pNew = 0;
+ int nByte = 0;
+ int nIncr = 0;
+ int iField = pOld->nRecordField;
+ int eType = sqlite3_column_type(pDflt, iField);
+ switch( eType ){
+ case SQLITE_NULL:
+ nIncr = 1;
+ break;
+ case SQLITE_INTEGER:
+ case SQLITE_FLOAT:
+ nIncr = 9;
+ break;
+ default: {
+ int n = sqlite3_column_bytes(pDflt, iField);
+ nIncr = 1 + sessionVarintLen(n) + n;
+ assert( eType==SQLITE_TEXT || eType==SQLITE_BLOB );
+ break;
+ }
+ }
+
+ nByte = nIncr + (sizeof(SessionChange) + pOld->nRecord);
+ pNew = sessionMalloc64(pSession, nByte);
+ if( pNew==0 ){
+ *pRc = SQLITE_NOMEM;
+ return;
+ }else{
+ memcpy(pNew, pOld, sizeof(SessionChange));
+ pNew->aRecord = (u8*)&pNew[1];
+ memcpy(pNew->aRecord, pOld->aRecord, pOld->nRecord);
+ pNew->aRecord[pNew->nRecord++] = (u8)eType;
+ switch( eType ){
+ case SQLITE_INTEGER: {
+ i64 iVal = sqlite3_column_int64(pDflt, iField);
+ sessionPutI64(&pNew->aRecord[pNew->nRecord], iVal);
+ pNew->nRecord += 8;
+ break;
+ }
+
+ case SQLITE_FLOAT: {
+ double rVal = sqlite3_column_double(pDflt, iField);
+ i64 iVal = 0;
+ memcpy(&iVal, &rVal, sizeof(rVal));
+ sessionPutI64(&pNew->aRecord[pNew->nRecord], iVal);
+ pNew->nRecord += 8;
+ break;
+ }
+
+ case SQLITE_TEXT: {
+ int n = sqlite3_column_bytes(pDflt, iField);
+ const char *z = (const char*)sqlite3_column_text(pDflt, iField);
+ pNew->nRecord += sessionVarintPut(&pNew->aRecord[pNew->nRecord], n);
+ memcpy(&pNew->aRecord[pNew->nRecord], z, n);
+ pNew->nRecord += n;
+ break;
+ }
+
+ case SQLITE_BLOB: {
+ int n = sqlite3_column_bytes(pDflt, iField);
+ const u8 *z = (const u8*)sqlite3_column_blob(pDflt, iField);
+ pNew->nRecord += sessionVarintPut(&pNew->aRecord[pNew->nRecord], n);
+ memcpy(&pNew->aRecord[pNew->nRecord], z, n);
+ pNew->nRecord += n;
+ break;
+ }
+
+ default:
+ assert( eType==SQLITE_NULL );
+ break;
+ }
+
+ sessionFree(pSession, pOld);
+ *pp = pOld = pNew;
+ pNew->nRecordField++;
+ pNew->nMaxSize += nIncr;
+ if( pSession ){
+ pSession->nMaxChangesetSize += nIncr;
+ }
+ }
+ }
+}
+
+/*
+** Ensure that there is room in the buffer to append nByte bytes of data.
+** If not, use sqlite3_realloc() to grow the buffer so that there is.
+**
+** If successful, return zero. Otherwise, if an OOM condition is encountered,
+** set *pRc to SQLITE_NOMEM and return non-zero.
+*/
+static int sessionBufferGrow(SessionBuffer *p, i64 nByte, int *pRc){
+#define SESSION_MAX_BUFFER_SZ (0x7FFFFF00 - 1)
+ i64 nReq = p->nBuf + nByte;
+ if( *pRc==SQLITE_OK && nReq>p->nAlloc ){
+ u8 *aNew;
+ i64 nNew = p->nAlloc ? p->nAlloc : 128;
+
+ do {
+ nNew = nNew*2;
+ }while( nNew<nReq );
+
+ /* The value of SESSION_MAX_BUFFER_SZ is copied from the implementation
+ ** of sqlite3_realloc64(). Allocations greater than this size in bytes
+ ** always fail. It is used here to ensure that this routine can always
+ ** allocate up to this limit - instead of up to the largest power of
+ ** two smaller than the limit. */
+ if( nNew>SESSION_MAX_BUFFER_SZ ){
+ nNew = SESSION_MAX_BUFFER_SZ;
+ if( nNew<nReq ){
+ *pRc = SQLITE_NOMEM;
+ return 1;
+ }
+ }
+
+ aNew = (u8 *)sqlite3_realloc64(p->aBuf, nNew);
+ if( 0==aNew ){
+ *pRc = SQLITE_NOMEM;
+ }else{
+ p->aBuf = aNew;
+ p->nAlloc = nNew;
+ }
+ }
+ return (*pRc!=SQLITE_OK);
+}
+
+
+/*
+** This function is a no-op if *pRc is other than SQLITE_OK when it is
+** called. Otherwise, append a string to the buffer. All bytes in the string
+** up to (but not including) the nul-terminator are written to the buffer.
+**
+** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before
+** returning.
+*/
+static void sessionAppendStr(
+ SessionBuffer *p,
+ const char *zStr,
+ int *pRc
+){
+ int nStr = sqlite3Strlen30(zStr);
+ if( 0==sessionBufferGrow(p, nStr+1, pRc) ){
+ memcpy(&p->aBuf[p->nBuf], zStr, nStr);
+ p->nBuf += nStr;
+ p->aBuf[p->nBuf] = 0x00;
+ }
+}
+
+/*
+** Format a string using printf() style formatting and then append it to the
+** buffer using sessionAppendString().
+*/
+static void sessionAppendPrintf(
+ SessionBuffer *p, /* Buffer to append to */
+ int *pRc,
+ const char *zFmt,
+ ...
+){
+ if( *pRc==SQLITE_OK ){
+ char *zApp = 0;
+ va_list ap;
+ va_start(ap, zFmt);
+ zApp = sqlite3_vmprintf(zFmt, ap);
+ if( zApp==0 ){
+ *pRc = SQLITE_NOMEM;
+ }else{
+ sessionAppendStr(p, zApp, pRc);
+ }
+ va_end(ap);
+ sqlite3_free(zApp);
+ }
+}
+
+/*
+** Prepare a statement against database handle db that SELECTs a single
+** row containing the default values for each column in table pTab. For
+** example, if pTab is declared as:
+**
+** CREATE TABLE pTab(a PRIMARY KEY, b DEFAULT 123, c DEFAULT 'abcd');
+**
+** Then this function prepares and returns the SQL statement:
+**
+** SELECT NULL, 123, 'abcd';
+*/
+static int sessionPrepareDfltStmt(
+ sqlite3 *db, /* Database handle */
+ SessionTable *pTab, /* Table to prepare statement for */
+ sqlite3_stmt **ppStmt /* OUT: Statement handle */
+){
+ SessionBuffer sql = {0,0,0};
+ int rc = SQLITE_OK;
+ const char *zSep = " ";
+ int ii = 0;
+
+ *ppStmt = 0;
+ sessionAppendPrintf(&sql, &rc, "SELECT");
+ for(ii=0; ii<pTab->nCol; ii++){
+ const char *zDflt = pTab->azDflt[ii] ? pTab->azDflt[ii] : "NULL";
+ sessionAppendPrintf(&sql, &rc, "%s%s", zSep, zDflt);
+ zSep = ", ";
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_prepare_v2(db, (const char*)sql.aBuf, -1, ppStmt, 0);
+ }
+ sqlite3_free(sql.aBuf);
+
+ return rc;
+}
+
+/*
+** Table pTab has one or more existing change-records with old.* records
+** with fewer than pTab->nCol columns. This function updates all such
+** change-records with the default values for the missing columns.
+*/
+static int sessionUpdateChanges(sqlite3_session *pSession, SessionTable *pTab){
+ sqlite3_stmt *pStmt = 0;
+ int rc = pSession->rc;
+
+ rc = sessionPrepareDfltStmt(pSession->db, pTab, &pStmt);
+ if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
+ int ii = 0;
+ SessionChange **pp = 0;
+ for(ii=0; ii<pTab->nChange; ii++){
+ for(pp=&pTab->apChange[ii]; *pp; pp=&((*pp)->pNext)){
+ if( (*pp)->nRecordField!=pTab->nCol ){
+ sessionUpdateOneChange(pSession, &rc, pp, pTab->nCol, pStmt);
+ }
+ }
+ }
+ }
+
+ pSession->rc = rc;
+ rc = sqlite3_finalize(pStmt);
+ if( pSession->rc==SQLITE_OK ) pSession->rc = rc;
+ return pSession->rc;
}
/*
@@ -219098,16 +224674,22 @@ static void sessionPreupdateOneChange(
int iHash;
int bNull = 0;
int rc = SQLITE_OK;
+ int nExpect = 0;
SessionStat1Ctx stat1 = {{0,0,0,0,0},0};
if( pSession->rc ) return;
/* Load table details if required */
- if( sessionInitTable(pSession, pTab) ) return;
+ if( sessionInitTable(pSession, pTab, pSession->db, pSession->zDb) ) return;
/* Check the number of columns in this xPreUpdate call matches the
** number of columns in the table. */
- if( (pTab->nCol-pTab->bRowid)!=pSession->hook.xCount(pSession->hook.pCtx) ){
+ nExpect = pSession->hook.xCount(pSession->hook.pCtx);
+ if( (pTab->nCol-pTab->bRowid)<nExpect ){
+ if( sessionReinitTable(pSession, pTab) ) return;
+ if( sessionUpdateChanges(pSession, pTab) ) return;
+ }
+ if( (pTab->nCol-pTab->bRowid)!=nExpect ){
pSession->rc = SQLITE_SCHEMA;
return;
}
@@ -219184,7 +224766,7 @@ static void sessionPreupdateOneChange(
}
/* Allocate the change object */
- pC = (SessionChange *)sessionMalloc64(pSession, nByte);
+ pC = (SessionChange*)sessionMalloc64(pSession, nByte);
if( !pC ){
rc = SQLITE_NOMEM;
goto error_out;
@@ -219217,6 +224799,7 @@ static void sessionPreupdateOneChange(
if( pSession->bIndirect || pSession->hook.xDepth(pSession->hook.pCtx) ){
pC->bIndirect = 1;
}
+ pC->nRecordField = pTab->nCol;
pC->nRecord = nByte;
pC->op = op;
pC->pNext = pTab->apChange[iHash];
@@ -219596,7 +225179,7 @@ SQLITE_API int sqlite3session_diff(
/* Locate and if necessary initialize the target table object */
rc = sessionFindTable(pSession, zTbl, &pTo);
if( pTo==0 ) goto diff_out;
- if( sessionInitTable(pSession, pTo) ){
+ if( sessionInitTable(pSession, pTo, pSession->db, pSession->zDb) ){
rc = pSession->rc;
goto diff_out;
}
@@ -219609,7 +225192,7 @@ SQLITE_API int sqlite3session_diff(
int bRowid = 0;
u8 *abPK;
const char **azCol = 0;
- rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, &abPK,
+ rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, 0, &abPK,
pSession->bImplicitPK ? &bRowid : 0
);
if( rc==SQLITE_OK ){
@@ -219724,6 +225307,7 @@ static void sessionDeleteTable(sqlite3_session *pSession, SessionTable *pList){
sessionFree(pSession, p);
}
}
+ sqlite3_finalize(pTab->pDfltStmt);
sessionFree(pSession, (char*)pTab->azCol); /* cast works around VC++ bug */
sessionFree(pSession, pTab->apChange);
sessionFree(pSession, pTab);
@@ -219756,9 +225340,7 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession){
** associated hash-tables. */
sessionDeleteTable(pSession, pSession->pTable);
- /* Assert that all allocations have been freed and then free the
- ** session object itself. */
- assert( pSession->nMalloc==0 );
+ /* Free the session object. */
sqlite3_free(pSession);
}
@@ -219830,48 +225412,6 @@ SQLITE_API int sqlite3session_attach(
}
/*
-** Ensure that there is room in the buffer to append nByte bytes of data.
-** If not, use sqlite3_realloc() to grow the buffer so that there is.
-**
-** If successful, return zero. Otherwise, if an OOM condition is encountered,
-** set *pRc to SQLITE_NOMEM and return non-zero.
-*/
-static int sessionBufferGrow(SessionBuffer *p, i64 nByte, int *pRc){
-#define SESSION_MAX_BUFFER_SZ (0x7FFFFF00 - 1)
- i64 nReq = p->nBuf + nByte;
- if( *pRc==SQLITE_OK && nReq>p->nAlloc ){
- u8 *aNew;
- i64 nNew = p->nAlloc ? p->nAlloc : 128;
-
- do {
- nNew = nNew*2;
- }while( nNew<nReq );
-
- /* The value of SESSION_MAX_BUFFER_SZ is copied from the implementation
- ** of sqlite3_realloc64(). Allocations greater than this size in bytes
- ** always fail. It is used here to ensure that this routine can always
- ** allocate up to this limit - instead of up to the largest power of
- ** two smaller than the limit. */
- if( nNew>SESSION_MAX_BUFFER_SZ ){
- nNew = SESSION_MAX_BUFFER_SZ;
- if( nNew<nReq ){
- *pRc = SQLITE_NOMEM;
- return 1;
- }
- }
-
- aNew = (u8 *)sqlite3_realloc64(p->aBuf, nNew);
- if( 0==aNew ){
- *pRc = SQLITE_NOMEM;
- }else{
- p->aBuf = aNew;
- p->nAlloc = nNew;
- }
- }
- return (*pRc!=SQLITE_OK);
-}
-
-/*
** Append the value passed as the second argument to the buffer passed
** as the first.
**
@@ -219941,27 +225481,6 @@ static void sessionAppendBlob(
/*
** This function is a no-op if *pRc is other than SQLITE_OK when it is
-** called. Otherwise, append a string to the buffer. All bytes in the string
-** up to (but not including) the nul-terminator are written to the buffer.
-**
-** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before
-** returning.
-*/
-static void sessionAppendStr(
- SessionBuffer *p,
- const char *zStr,
- int *pRc
-){
- int nStr = sqlite3Strlen30(zStr);
- if( 0==sessionBufferGrow(p, nStr+1, pRc) ){
- memcpy(&p->aBuf[p->nBuf], zStr, nStr);
- p->nBuf += nStr;
- p->aBuf[p->nBuf] = 0x00;
- }
-}
-
-/*
-** This function is a no-op if *pRc is other than SQLITE_OK when it is
** called. Otherwise, append the string representation of integer iVal
** to the buffer. No nul-terminator is written.
**
@@ -219978,27 +225497,6 @@ static void sessionAppendInteger(
sessionAppendStr(p, aBuf, pRc);
}
-static void sessionAppendPrintf(
- SessionBuffer *p, /* Buffer to append to */
- int *pRc,
- const char *zFmt,
- ...
-){
- if( *pRc==SQLITE_OK ){
- char *zApp = 0;
- va_list ap;
- va_start(ap, zFmt);
- zApp = sqlite3_vmprintf(zFmt, ap);
- if( zApp==0 ){
- *pRc = SQLITE_NOMEM;
- }else{
- sessionAppendStr(p, zApp, pRc);
- }
- va_end(ap);
- sqlite3_free(zApp);
- }
-}
-
/*
** This function is a no-op if *pRc is other than SQLITE_OK when it is
** called. Otherwise, append the string zStr enclosed in quotes (") and
@@ -220489,26 +225987,16 @@ static int sessionGenerateChangeset(
for(pTab=pSession->pTable; rc==SQLITE_OK && pTab; pTab=pTab->pNext){
if( pTab->nEntry ){
const char *zName = pTab->zName;
- int nCol = 0; /* Number of columns in table */
- u8 *abPK = 0; /* Primary key array */
- const char **azCol = 0; /* Table columns */
int i; /* Used to iterate through hash buckets */
sqlite3_stmt *pSel = 0; /* SELECT statement to query table pTab */
int nRewind = buf.nBuf; /* Initial size of write buffer */
int nNoop; /* Size of buffer after writing tbl header */
- int bRowid = 0;
+ int nOldCol = pTab->nCol;
/* Check the table schema is still Ok. */
- rc = sessionTableInfo(
- 0, db, pSession->zDb, zName, &nCol, 0, &azCol, &abPK,
- (pSession->bImplicitPK ? &bRowid : 0)
- );
- if( rc==SQLITE_OK && (
- pTab->nCol!=nCol
- || pTab->bRowid!=bRowid
- || memcmp(abPK, pTab->abPK, nCol)
- )){
- rc = SQLITE_SCHEMA;
+ rc = sessionReinitTable(pSession, pTab);
+ if( rc==SQLITE_OK && pTab->nCol!=nOldCol ){
+ rc = sessionUpdateChanges(pSession, pTab);
}
/* Write a table header */
@@ -220516,8 +226004,8 @@ static int sessionGenerateChangeset(
/* Build and compile a statement to execute: */
if( rc==SQLITE_OK ){
- rc = sessionSelectStmt(
- db, 0, pSession->zDb, zName, bRowid, nCol, azCol, abPK, &pSel
+ rc = sessionSelectStmt(db, 0, pSession->zDb,
+ zName, pTab->bRowid, pTab->nCol, pTab->azCol, pTab->abPK, &pSel
);
}
@@ -220526,22 +226014,22 @@ static int sessionGenerateChangeset(
SessionChange *p; /* Used to iterate through changes */
for(p=pTab->apChange[i]; rc==SQLITE_OK && p; p=p->pNext){
- rc = sessionSelectBind(pSel, nCol, abPK, p);
+ rc = sessionSelectBind(pSel, pTab->nCol, pTab->abPK, p);
if( rc!=SQLITE_OK ) continue;
if( sqlite3_step(pSel)==SQLITE_ROW ){
if( p->op==SQLITE_INSERT ){
int iCol;
sessionAppendByte(&buf, SQLITE_INSERT, &rc);
sessionAppendByte(&buf, p->bIndirect, &rc);
- for(iCol=0; iCol<nCol; iCol++){
+ for(iCol=0; iCol<pTab->nCol; iCol++){
sessionAppendCol(&buf, pSel, iCol, &rc);
}
}else{
- assert( abPK!=0 ); /* Because sessionSelectStmt() returned ok */
- rc = sessionAppendUpdate(&buf, bPatchset, pSel, p, abPK);
+ assert( pTab->abPK!=0 );
+ rc = sessionAppendUpdate(&buf, bPatchset, pSel, p, pTab->abPK);
}
}else if( p->op!=SQLITE_INSERT ){
- rc = sessionAppendDelete(&buf, bPatchset, p, nCol, abPK);
+ rc = sessionAppendDelete(&buf, bPatchset, p, pTab->nCol,pTab->abPK);
}
if( rc==SQLITE_OK ){
rc = sqlite3_reset(pSel);
@@ -220566,7 +226054,6 @@ static int sessionGenerateChangeset(
if( buf.nBuf==nNoop ){
buf.nBuf = nRewind;
}
- sqlite3_free((char*)azCol); /* cast works around VC++ bug */
}
}
@@ -220990,15 +226477,19 @@ static int sessionReadRecord(
}
}
if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
- sqlite3_int64 v = sessionGetI64(aVal);
- if( eType==SQLITE_INTEGER ){
- sqlite3VdbeMemSetInt64(apOut[i], v);
+ if( (pIn->nData-pIn->iNext)<8 ){
+ rc = SQLITE_CORRUPT_BKPT;
}else{
- double d;
- memcpy(&d, &v, 8);
- sqlite3VdbeMemSetDouble(apOut[i], d);
+ sqlite3_int64 v = sessionGetI64(aVal);
+ if( eType==SQLITE_INTEGER ){
+ sqlite3VdbeMemSetInt64(apOut[i], v);
+ }else{
+ double d;
+ memcpy(&d, &v, 8);
+ sqlite3VdbeMemSetDouble(apOut[i], d);
+ }
+ pIn->iNext += 8;
}
- pIn->iNext += 8;
}
}
}
@@ -222691,7 +228182,7 @@ static int sessionChangesetApply(
sqlite3changeset_pk(pIter, &abPK, 0);
rc = sessionTableInfo(0, db, "main", zNew,
- &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK, &sApply.bRowid
+ &sApply.nCol, &zTab, &sApply.azCol, 0, &sApply.abPK, &sApply.bRowid
);
if( rc!=SQLITE_OK ) break;
for(i=0; i<sApply.nCol; i++){
@@ -222823,11 +228314,24 @@ SQLITE_API int sqlite3changeset_apply_v2(
sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */
int bInv = !!(flags & SQLITE_CHANGESETAPPLY_INVERT);
int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset, bInv, 1);
+ u64 savedFlag = db->flags & SQLITE_FkNoAction;
+
+ if( flags & SQLITE_CHANGESETAPPLY_FKNOACTION ){
+ db->flags |= ((u64)SQLITE_FkNoAction);
+ db->aDb[0].pSchema->schema_cookie -= 32;
+ }
+
if( rc==SQLITE_OK ){
rc = sessionChangesetApply(
db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags
);
}
+
+ if( (flags & SQLITE_CHANGESETAPPLY_FKNOACTION) && savedFlag==0 ){
+ assert( db->flags & SQLITE_FkNoAction );
+ db->flags &= ~((u64)SQLITE_FkNoAction);
+ db->aDb[0].pSchema->schema_cookie -= 32;
+ }
return rc;
}
@@ -222915,6 +228419,9 @@ struct sqlite3_changegroup {
int rc; /* Error code */
int bPatch; /* True to accumulate patchsets */
SessionTable *pList; /* List of tables in current patch */
+
+ sqlite3 *db; /* Configured by changegroup_schema() */
+ char *zDb; /* Configured by changegroup_schema() */
};
/*
@@ -222935,6 +228442,7 @@ static int sessionChangeMerge(
){
SessionChange *pNew = 0;
int rc = SQLITE_OK;
+ assert( aRec!=0 );
if( !pExist ){
pNew = (SessionChange *)sqlite3_malloc64(sizeof(SessionChange) + nRec);
@@ -223101,6 +228609,114 @@ static int sessionChangeMerge(
}
/*
+** Check if a changeset entry with nCol columns and the PK array passed
+** as the final argument to this function is compatible with SessionTable
+** pTab. If so, return 1. Otherwise, if they are incompatible in some way,
+** return 0.
+*/
+static int sessionChangesetCheckCompat(
+ SessionTable *pTab,
+ int nCol,
+ u8 *abPK
+){
+ if( pTab->azCol && nCol<pTab->nCol ){
+ int ii;
+ for(ii=0; ii<pTab->nCol; ii++){
+ u8 bPK = (ii < nCol) ? abPK[ii] : 0;
+ if( pTab->abPK[ii]!=bPK ) return 0;
+ }
+ return 1;
+ }
+ return (pTab->nCol==nCol && 0==memcmp(abPK, pTab->abPK, nCol));
+}
+
+static int sessionChangesetExtendRecord(
+ sqlite3_changegroup *pGrp,
+ SessionTable *pTab,
+ int nCol,
+ int op,
+ const u8 *aRec,
+ int nRec,
+ SessionBuffer *pOut
+){
+ int rc = SQLITE_OK;
+ int ii = 0;
+
+ assert( pTab->azCol );
+ assert( nCol<pTab->nCol );
+
+ pOut->nBuf = 0;
+ if( op==SQLITE_INSERT || (op==SQLITE_DELETE && pGrp->bPatch==0) ){
+ /* Append the missing default column values to the record. */
+ sessionAppendBlob(pOut, aRec, nRec, &rc);
+ if( rc==SQLITE_OK && pTab->pDfltStmt==0 ){
+ rc = sessionPrepareDfltStmt(pGrp->db, pTab, &pTab->pDfltStmt);
+ }
+ for(ii=nCol; rc==SQLITE_OK && ii<pTab->nCol; ii++){
+ int eType = sqlite3_column_type(pTab->pDfltStmt, ii);
+ sessionAppendByte(pOut, eType, &rc);
+ switch( eType ){
+ case SQLITE_FLOAT:
+ case SQLITE_INTEGER: {
+ i64 iVal;
+ if( eType==SQLITE_INTEGER ){
+ iVal = sqlite3_column_int64(pTab->pDfltStmt, ii);
+ }else{
+ double rVal = sqlite3_column_int64(pTab->pDfltStmt, ii);
+ memcpy(&iVal, &rVal, sizeof(i64));
+ }
+ if( SQLITE_OK==sessionBufferGrow(pOut, 8, &rc) ){
+ sessionPutI64(&pOut->aBuf[pOut->nBuf], iVal);
+ }
+ break;
+ }
+
+ case SQLITE_BLOB:
+ case SQLITE_TEXT: {
+ int n = sqlite3_column_bytes(pTab->pDfltStmt, ii);
+ sessionAppendVarint(pOut, n, &rc);
+ if( eType==SQLITE_TEXT ){
+ const u8 *z = (const u8*)sqlite3_column_text(pTab->pDfltStmt, ii);
+ sessionAppendBlob(pOut, z, n, &rc);
+ }else{
+ const u8 *z = (const u8*)sqlite3_column_blob(pTab->pDfltStmt, ii);
+ sessionAppendBlob(pOut, z, n, &rc);
+ }
+ break;
+ }
+
+ default:
+ assert( eType==SQLITE_NULL );
+ break;
+ }
+ }
+ }else if( op==SQLITE_UPDATE ){
+ /* Append missing "undefined" entries to the old.* record. And, if this
+ ** is an UPDATE, to the new.* record as well. */
+ int iOff = 0;
+ if( pGrp->bPatch==0 ){
+ for(ii=0; ii<nCol; ii++){
+ iOff += sessionSerialLen(&aRec[iOff]);
+ }
+ sessionAppendBlob(pOut, aRec, iOff, &rc);
+ for(ii=0; ii<(pTab->nCol-nCol); ii++){
+ sessionAppendByte(pOut, 0x00, &rc);
+ }
+ }
+
+ sessionAppendBlob(pOut, &aRec[iOff], nRec-iOff, &rc);
+ for(ii=0; ii<(pTab->nCol-nCol); ii++){
+ sessionAppendByte(pOut, 0x00, &rc);
+ }
+ }else{
+ assert( op==SQLITE_DELETE && pGrp->bPatch );
+ sessionAppendBlob(pOut, aRec, nRec, &rc);
+ }
+
+ return rc;
+}
+
+/*
** Add all changes in the changeset traversed by the iterator passed as
** the first argument to the changegroup hash tables.
*/
@@ -223113,6 +228729,7 @@ static int sessionChangesetToHash(
int nRec;
int rc = SQLITE_OK;
SessionTable *pTab = 0;
+ SessionBuffer rec = {0, 0, 0};
while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec, 0) ){
const char *zNew;
@@ -223124,6 +228741,9 @@ static int sessionChangesetToHash(
SessionChange *pExist = 0;
SessionChange **pp;
+ /* Ensure that only changesets, or only patchsets, but not a mixture
+ ** of both, are being combined. It is an error to try to combine a
+ ** changeset and a patchset. */
if( pGrp->pList==0 ){
pGrp->bPatch = pIter->bPatchset;
}else if( pIter->bPatchset!=pGrp->bPatch ){
@@ -223156,18 +228776,38 @@ static int sessionChangesetToHash(
pTab->zName = (char*)&pTab->abPK[nCol];
memcpy(pTab->zName, zNew, nNew+1);
+ if( pGrp->db ){
+ pTab->nCol = 0;
+ rc = sessionInitTable(0, pTab, pGrp->db, pGrp->zDb);
+ if( rc ){
+ assert( pTab->azCol==0 );
+ sqlite3_free(pTab);
+ break;
+ }
+ }
+
/* The new object must be linked on to the end of the list, not
** simply added to the start of it. This is to ensure that the
** tables within the output of sqlite3changegroup_output() are in
** the right order. */
for(ppTab=&pGrp->pList; *ppTab; ppTab=&(*ppTab)->pNext);
*ppTab = pTab;
- }else if( pTab->nCol!=nCol || memcmp(pTab->abPK, abPK, nCol) ){
+ }
+
+ if( !sessionChangesetCheckCompat(pTab, nCol, abPK) ){
rc = SQLITE_SCHEMA;
break;
}
}
+ if( nCol<pTab->nCol ){
+ assert( pGrp->db );
+ rc = sessionChangesetExtendRecord(pGrp, pTab, nCol, op, aRec, nRec, &rec);
+ if( rc ) break;
+ aRec = rec.aBuf;
+ nRec = rec.nBuf;
+ }
+
if( sessionGrowHash(0, pIter->bPatchset, pTab) ){
rc = SQLITE_NOMEM;
break;
@@ -223205,6 +228845,7 @@ static int sessionChangesetToHash(
}
}
+ sqlite3_free(rec.aBuf);
if( rc==SQLITE_OK ) rc = pIter->rc;
return rc;
}
@@ -223292,6 +228933,31 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp){
}
/*
+** Provide a database schema to the changegroup object.
+*/
+SQLITE_API int sqlite3changegroup_schema(
+ sqlite3_changegroup *pGrp,
+ sqlite3 *db,
+ const char *zDb
+){
+ int rc = SQLITE_OK;
+
+ if( pGrp->pList || pGrp->db ){
+ /* Cannot add a schema after one or more calls to sqlite3changegroup_add(),
+ ** or after sqlite3changegroup_schema() has already been called. */
+ rc = SQLITE_MISUSE;
+ }else{
+ pGrp->zDb = sqlite3_mprintf("%s", zDb);
+ if( pGrp->zDb==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ pGrp->db = db;
+ }
+ }
+ return rc;
+}
+
+/*
** Add the changeset currently stored in buffer pData, size nData bytes,
** to changeset-group p.
*/
@@ -223354,6 +229020,7 @@ SQLITE_API int sqlite3changegroup_output_strm(
*/
SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup *pGrp){
if( pGrp ){
+ sqlite3_free(pGrp->zDb);
sessionDeleteTable(0, pGrp->pList);
sqlite3_free(pGrp);
}
@@ -223886,8 +229553,11 @@ struct Fts5PhraseIter {
** created with the "columnsize=0" option.
**
** xColumnText:
-** This function attempts to retrieve the text of column iCol of the
-** current document. If successful, (*pz) is set to point to a buffer
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of columns in the table, SQLITE_RANGE is returned.
+**
+** Otherwise, this function attempts to retrieve the text of column iCol of
+** the current document. If successful, (*pz) is set to point to a buffer
** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
** if an error occurs, an SQLite error code is returned and the final values
@@ -223897,8 +229567,10 @@ struct Fts5PhraseIter {
** Returns the number of phrases in the current query expression.
**
** xPhraseSize:
-** Returns the number of tokens in phrase iPhrase of the query. Phrases
-** are numbered starting from zero.
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of phrases in the current query, as returned by xPhraseCount,
+** 0 is returned. Otherwise, this function returns the number of tokens in
+** phrase iPhrase of the query. Phrases are numbered starting from zero.
**
** xInstCount:
** Set *pnInst to the total number of occurrences of all phrases within
@@ -223914,12 +229586,13 @@ struct Fts5PhraseIter {
** Query for the details of phrase match iIdx within the current row.
** Phrase matches are numbered starting from zero, so the iIdx argument
** should be greater than or equal to zero and smaller than the value
-** output by xInstCount().
+** output by xInstCount(). If iIdx is less than zero or greater than
+** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned.
**
-** Usually, output parameter *piPhrase is set to the phrase number, *piCol
+** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol
** to the column in which it occurs and *piOff the token offset of the
-** first token of the phrase. Returns SQLITE_OK if successful, or an error
-** code (i.e. SQLITE_NOMEM) if an error occurs.
+** first token of the phrase. SQLITE_OK is returned if successful, or an
+** error code (i.e. SQLITE_NOMEM) if an error occurs.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
@@ -223945,6 +229618,10 @@ struct Fts5PhraseIter {
** Invoking Api.xUserData() returns a copy of the pointer passed as
** the third argument to pUserData.
**
+** If parameter iPhrase is less than zero, or greater than or equal to
+** the number of phrases in the query, as returned by xPhraseCount(),
+** this function returns SQLITE_RANGE.
+**
** If the callback function returns any value other than SQLITE_OK, the
** query is abandoned and the xQueryPhrase function returns immediately.
** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
@@ -224059,6 +229736,39 @@ struct Fts5PhraseIter {
**
** xPhraseNextColumn()
** See xPhraseFirstColumn above.
+**
+** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase iPhrase of the current
+** query. Before returning, output parameter *ppToken is set to point
+** to a buffer containing the requested token, and *pnToken to the
+** size of this buffer in bytes.
+**
+** If iPhrase or iToken are less than zero, or if iPhrase is greater than
+** or equal to the number of phrases in the query as reported by
+** xPhraseCount(), or if iToken is equal to or greater than the number of
+** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken
+ are both zeroed.
+**
+** The output text is not a copy of the query text that specified the
+** token. It is the output of the tokenizer module. For tokendata=1
+** tables, this includes any embedded 0x00 and trailing data.
+**
+** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase hit iIdx within the
+** current row. If iIdx is less than zero or greater than or equal to the
+** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise,
+** output variable (*ppToken) is set to point to a buffer containing the
+** matching document token, and (*pnToken) to the size of that buffer in
+** bytes. This API is not available if the specified token matches a
+** prefix query term. In that case both output variables are always set
+** to 0.
+**
+** The output text is not a copy of the document text that was tokenized.
+** It is the output of the tokenizer module. For tokendata=1 tables, this
+** includes any embedded 0x00 and trailing data.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option.
*/
struct Fts5ExtensionApi {
int iVersion; /* Currently always set to 3 */
@@ -224096,6 +229806,13 @@ struct Fts5ExtensionApi {
int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
+
+ /* Below this point are iVersion>=3 only */
+ int (*xQueryToken)(Fts5Context*,
+ int iPhrase, int iToken,
+ const char **ppToken, int *pnToken
+ );
+ int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*);
};
/*
@@ -224290,8 +230007,8 @@ struct Fts5ExtensionApi {
** as separate queries of the FTS index are required for each synonym.
**
** When using methods (2) or (3), it is important that the tokenizer only
-** provide synonyms when tokenizing document text (method (2)) or query
-** text (method (3)), not both. Doing so will not cause any errors, but is
+** provide synonyms when tokenizing document text (method (3)) or query
+** text (method (2)), not both. Doing so will not cause any errors, but is
** inefficient.
*/
typedef struct Fts5Tokenizer Fts5Tokenizer;
@@ -224339,7 +230056,7 @@ struct fts5_api {
int (*xCreateTokenizer)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_tokenizer *pTokenizer,
void (*xDestroy)(void*)
);
@@ -224348,7 +230065,7 @@ struct fts5_api {
int (*xFindTokenizer)(
fts5_api *pApi,
const char *zName,
- void **ppContext,
+ void **ppUserData,
fts5_tokenizer *pTokenizer
);
@@ -224356,7 +230073,7 @@ struct fts5_api {
int (*xCreateFunction)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_extension_function xFunction,
void (*xDestroy)(void*)
);
@@ -224528,6 +230245,10 @@ typedef struct Fts5Config Fts5Config;
** attempt to merge together. A value of 1 sets the object to use the
** compile time default. Zero disables auto-merge altogether.
**
+** bContentlessDelete:
+** True if the contentless_delete option was present in the CREATE
+** VIRTUAL TABLE statement.
+**
** zContent:
**
** zContentRowid:
@@ -224562,9 +230283,11 @@ struct Fts5Config {
int nPrefix; /* Number of prefix indexes */
int *aPrefix; /* Sizes in bytes of nPrefix prefix indexes */
int eContent; /* An FTS5_CONTENT value */
+ int bContentlessDelete; /* "contentless_delete=" option (dflt==0) */
char *zContent; /* content table */
char *zContentRowid; /* "content_rowid=" option value */
int bColumnsize; /* "columnsize=" option value (dflt==1) */
+ int bTokendata; /* "tokendata=" option value (dflt==0) */
int eDetail; /* FTS5_DETAIL_XXX value */
char *zContentExprlist;
Fts5Tokenizer *pTok;
@@ -224583,6 +230306,7 @@ struct Fts5Config {
char *zRank; /* Name of rank function */
char *zRankArgs; /* Arguments to rank function */
int bSecureDelete; /* 'secure-delete' */
+ int nDeleteMerge; /* 'deletemerge' */
/* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */
char **pzErrmsg;
@@ -224752,17 +230476,19 @@ struct Fts5IndexIter {
/*
** Values used as part of the flags argument passed to IndexQuery().
*/
-#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */
-#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */
-#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */
-#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */
+#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */
+#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */
+#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */
+#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */
/* The following are used internally by the fts5_index.c module. They are
** defined here only to make it easier to avoid clashes with the flags
** above. */
-#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010
-#define FTS5INDEX_QUERY_NOOUTPUT 0x0020
-#define FTS5INDEX_QUERY_SKIPHASH 0x0040
+#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010
+#define FTS5INDEX_QUERY_NOOUTPUT 0x0020
+#define FTS5INDEX_QUERY_SKIPHASH 0x0040
+#define FTS5INDEX_QUERY_NOTOKENDATA 0x0080
+#define FTS5INDEX_QUERY_SCANONETERM 0x0100
/*
** Create/destroy an Fts5Index object.
@@ -224831,6 +230557,10 @@ static void *sqlite3Fts5StructureRef(Fts5Index*);
static void sqlite3Fts5StructureRelease(void*);
static int sqlite3Fts5StructureTest(Fts5Index*, void*);
+/*
+** Used by xInstToken():
+*/
+static int sqlite3Fts5IterToken(Fts5IndexIter*, i64, int, int, const char**, int*);
/*
** Insert or remove data to or from the index. Each time a document is
@@ -224905,6 +230635,16 @@ static int sqlite3Fts5IndexReset(Fts5Index *p);
static int sqlite3Fts5IndexLoadConfig(Fts5Index *p);
+static int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin);
+static int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid);
+
+static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter*);
+
+/* Used to populate hash tables for xInstToken in detail=none/column mode. */
+static int sqlite3Fts5IndexIterWriteTokendata(
+ Fts5IndexIter*, const char*, int, i64 iRowid, int iCol, int iOff
+);
+
/*
** End of interface to code in fts5_index.c.
**************************************************************************/
@@ -224989,6 +230729,11 @@ static int sqlite3Fts5HashWrite(
*/
static void sqlite3Fts5HashClear(Fts5Hash*);
+/*
+** Return true if the hash is empty, false otherwise.
+*/
+static int sqlite3Fts5HashIsEmpty(Fts5Hash*);
+
static int sqlite3Fts5HashQuery(
Fts5Hash*, /* Hash table to query */
int nPre,
@@ -225005,11 +230750,13 @@ static void sqlite3Fts5HashScanNext(Fts5Hash*);
static int sqlite3Fts5HashScanEof(Fts5Hash*);
static void sqlite3Fts5HashScanEntry(Fts5Hash *,
const char **pzTerm, /* OUT: term (nul-terminated) */
+ int *pnTerm, /* OUT: Size of term in bytes */
const u8 **ppDoclist, /* OUT: pointer to doclist */
int *pnDoclist /* OUT: size of doclist in bytes */
);
+
/*
** End of interface to code in fts5_hash.c.
**************************************************************************/
@@ -225130,6 +230877,10 @@ static int sqlite3Fts5ExprClonePhrase(Fts5Expr*, int, Fts5Expr**);
static int sqlite3Fts5ExprPhraseCollist(Fts5Expr *, int, const u8 **, int *);
+static int sqlite3Fts5ExprQueryToken(Fts5Expr*, int, int, const char**, int*);
+static int sqlite3Fts5ExprInstToken(Fts5Expr*, i64, int, int, int, int, const char**, int*);
+static void sqlite3Fts5ExprClearTokens(Fts5Expr*);
+
/*******************************************
** The fts5_expr.c API above this point is used by the other hand-written
** C code in this module. The interfaces below this point are called by
@@ -225253,7 +231004,8 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*);
#define FTS5_STAR 15
/* This file is automatically generated by Lemon from input grammar
-** source file "fts5parse.y". */
+** source file "fts5parse.y".
+*/
/*
** 2000-05-29
**
@@ -226843,15 +232595,19 @@ static int fts5CInstIterInit(
*/
typedef struct HighlightContext HighlightContext;
struct HighlightContext {
- CInstIter iter; /* Coalesced Instance Iterator */
- int iPos; /* Current token offset in zIn[] */
+ /* Constant parameters to fts5HighlightCb() */
int iRangeStart; /* First token to include */
int iRangeEnd; /* If non-zero, last token to include */
const char *zOpen; /* Opening highlight */
const char *zClose; /* Closing highlight */
const char *zIn; /* Input text */
int nIn; /* Size of input text in bytes */
- int iOff; /* Current offset within zIn[] */
+
+ /* Variables modified by fts5HighlightCb() */
+ CInstIter iter; /* Coalesced Instance Iterator */
+ int iPos; /* Current token offset in zIn[] */
+ int iOff; /* Have copied up to this offset in zIn[] */
+ int bOpen; /* True if highlight is open */
char *zOut; /* Output value */
};
@@ -226884,8 +232640,8 @@ static int fts5HighlightCb(
int tflags, /* Mask of FTS5_TOKEN_* flags */
const char *pToken, /* Buffer containing token */
int nToken, /* Size of token in bytes */
- int iStartOff, /* Start offset of token */
- int iEndOff /* End offset of token */
+ int iStartOff, /* Start byte offset of token */
+ int iEndOff /* End byte offset of token */
){
HighlightContext *p = (HighlightContext*)pContext;
int rc = SQLITE_OK;
@@ -226901,30 +232657,55 @@ static int fts5HighlightCb(
if( p->iRangeStart && iPos==p->iRangeStart ) p->iOff = iStartOff;
}
- if( iPos==p->iter.iStart ){
+ /* If the parenthesis is open, and this token is not part of the current
+ ** phrase, and the starting byte offset of this token is past the point
+ ** that has currently been copied into the output buffer, close the
+ ** parenthesis. */
+ if( p->bOpen
+ && (iPos<=p->iter.iStart || p->iter.iStart<0)
+ && iStartOff>p->iOff
+ ){
+ fts5HighlightAppend(&rc, p, p->zClose, -1);
+ p->bOpen = 0;
+ }
+
+ /* If this is the start of a new phrase, and the highlight is not open:
+ **
+ ** * copy text from the input up to the start of the phrase, and
+ ** * open the highlight.
+ */
+ if( iPos==p->iter.iStart && p->bOpen==0 ){
fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iStartOff - p->iOff);
fts5HighlightAppend(&rc, p, p->zOpen, -1);
p->iOff = iStartOff;
+ p->bOpen = 1;
}
if( iPos==p->iter.iEnd ){
- if( p->iRangeEnd>=0 && p->iter.iStart<p->iRangeStart ){
+ if( p->bOpen==0 ){
+ assert( p->iRangeEnd>=0 );
fts5HighlightAppend(&rc, p, p->zOpen, -1);
+ p->bOpen = 1;
}
fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
- fts5HighlightAppend(&rc, p, p->zClose, -1);
p->iOff = iEndOff;
+
if( rc==SQLITE_OK ){
rc = fts5CInstIterNext(&p->iter);
}
}
- if( p->iRangeEnd>=0 && iPos==p->iRangeEnd ){
- fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
- p->iOff = iEndOff;
- if( iPos>=p->iter.iStart && iPos<p->iter.iEnd ){
+ if( iPos==p->iRangeEnd ){
+ if( p->bOpen ){
+ if( p->iter.iStart>=0 && iPos>=p->iter.iStart ){
+ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
+ p->iOff = iEndOff;
+ }
fts5HighlightAppend(&rc, p, p->zClose, -1);
+ p->bOpen = 0;
}
+ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
+ p->iOff = iEndOff;
}
return rc;
@@ -226956,8 +232737,10 @@ static void fts5HighlightFunction(
ctx.zClose = (const char*)sqlite3_value_text(apVal[2]);
ctx.iRangeEnd = -1;
rc = pApi->xColumnText(pFts, iCol, &ctx.zIn, &ctx.nIn);
-
- if( ctx.zIn ){
+ if( rc==SQLITE_RANGE ){
+ sqlite3_result_text(pCtx, "", -1, SQLITE_STATIC);
+ rc = SQLITE_OK;
+ }else if( ctx.zIn ){
if( rc==SQLITE_OK ){
rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter);
}
@@ -226965,6 +232748,9 @@ static void fts5HighlightFunction(
if( rc==SQLITE_OK ){
rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb);
}
+ if( ctx.bOpen ){
+ fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1);
+ }
fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff);
if( rc==SQLITE_OK ){
@@ -227243,6 +233029,9 @@ static void fts5SnippetFunction(
if( rc==SQLITE_OK ){
rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb);
}
+ if( ctx.bOpen ){
+ fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1);
+ }
if( ctx.iRangeEnd>=(nColSize-1) ){
fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff);
}else{
@@ -227518,6 +233307,7 @@ static void sqlite3Fts5BufferAppendBlob(
){
if( nData ){
if( fts5BufferGrow(pRc, pBuf, nData) ) return;
+ assert( pBuf->p!=0 );
memcpy(&pBuf->p[pBuf->n], pData, nData);
pBuf->n += nData;
}
@@ -227619,6 +233409,7 @@ static int sqlite3Fts5PoslistNext64(
i64 *piOff /* IN/OUT: Current offset */
){
int i = *pi;
+ assert( a!=0 || i==0 );
if( i>=n ){
/* EOF */
*piOff = -1;
@@ -227626,6 +233417,7 @@ static int sqlite3Fts5PoslistNext64(
}else{
i64 iOff = *piOff;
u32 iVal;
+ assert( a!=0 );
fts5FastGetVarint32(a, i, iVal);
if( iVal<=1 ){
if( iVal==0 ){
@@ -227881,6 +233673,8 @@ static void sqlite3Fts5TermsetFree(Fts5Termset *p){
#define FTS5_DEFAULT_CRISISMERGE 16
#define FTS5_DEFAULT_HASHSIZE (1024*1024)
+#define FTS5_DEFAULT_DELETE_AUTOMERGE 10 /* default 10% */
+
/* Maximum allowed page size */
#define FTS5_MAX_PAGE_SIZE (64*1024)
@@ -228211,6 +234005,16 @@ static int fts5ConfigParseSpecial(
return rc;
}
+ if( sqlite3_strnicmp("contentless_delete", zCmd, nCmd)==0 ){
+ if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){
+ *pzErr = sqlite3_mprintf("malformed contentless_delete=... directive");
+ rc = SQLITE_ERROR;
+ }else{
+ pConfig->bContentlessDelete = (zArg[0]=='1');
+ }
+ return rc;
+ }
+
if( sqlite3_strnicmp("content_rowid", zCmd, nCmd)==0 ){
if( pConfig->zContentRowid ){
*pzErr = sqlite3_mprintf("multiple content_rowid=... directives");
@@ -228245,6 +234049,16 @@ static int fts5ConfigParseSpecial(
return rc;
}
+ if( sqlite3_strnicmp("tokendata", zCmd, nCmd)==0 ){
+ if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){
+ *pzErr = sqlite3_mprintf("malformed tokendata=... directive");
+ rc = SQLITE_ERROR;
+ }else{
+ pConfig->bTokendata = (zArg[0]=='1');
+ }
+ return rc;
+ }
+
*pzErr = sqlite3_mprintf("unrecognized option: \"%.*s\"", nCmd, zCmd);
return SQLITE_ERROR;
}
@@ -228455,6 +234269,28 @@ static int sqlite3Fts5ConfigParse(
sqlite3_free(zTwo);
}
+ /* We only allow contentless_delete=1 if the table is indeed contentless. */
+ if( rc==SQLITE_OK
+ && pRet->bContentlessDelete
+ && pRet->eContent!=FTS5_CONTENT_NONE
+ ){
+ *pzErr = sqlite3_mprintf(
+ "contentless_delete=1 requires a contentless table"
+ );
+ rc = SQLITE_ERROR;
+ }
+
+ /* We only allow contentless_delete=1 if columnsize=0 is not present.
+ **
+ ** This restriction may be removed at some point.
+ */
+ if( rc==SQLITE_OK && pRet->bContentlessDelete && pRet->bColumnsize==0 ){
+ *pzErr = sqlite3_mprintf(
+ "contentless_delete=1 is incompatible with columnsize=0"
+ );
+ rc = SQLITE_ERROR;
+ }
+
/* If a tokenizer= option was successfully parsed, the tokenizer has
** already been allocated. Otherwise, allocate an instance of the default
** tokenizer (unicode61) now. */
@@ -228749,6 +234585,18 @@ static int sqlite3Fts5ConfigSetValue(
}
}
+ else if( 0==sqlite3_stricmp(zKey, "deletemerge") ){
+ int nVal = -1;
+ if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
+ nVal = sqlite3_value_int(pVal);
+ }else{
+ *pbBadkey = 1;
+ }
+ if( nVal<0 ) nVal = FTS5_DEFAULT_DELETE_AUTOMERGE;
+ if( nVal>100 ) nVal = 0;
+ pConfig->nDeleteMerge = nVal;
+ }
+
else if( 0==sqlite3_stricmp(zKey, "rank") ){
const char *zIn = (const char*)sqlite3_value_text(pVal);
char *zRank;
@@ -228797,6 +234645,7 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){
pConfig->nUsermerge = FTS5_DEFAULT_USERMERGE;
pConfig->nCrisisMerge = FTS5_DEFAULT_CRISISMERGE;
pConfig->nHashSize = FTS5_DEFAULT_HASHSIZE;
+ pConfig->nDeleteMerge = FTS5_DEFAULT_DELETE_AUTOMERGE;
zSql = sqlite3Fts5Mprintf(&rc, zSelect, pConfig->zDb, pConfig->zName);
if( zSql ){
@@ -228943,7 +234792,9 @@ struct Fts5ExprNode {
struct Fts5ExprTerm {
u8 bPrefix; /* True for a prefix term */
u8 bFirst; /* True if token must be first in column */
- char *zTerm; /* nul-terminated term */
+ char *pTerm; /* Term data */
+ int nQueryTerm; /* Effective size of term in bytes */
+ int nFullTerm; /* Size of term in bytes incl. tokendata */
Fts5IndexIter *pIter; /* Iterator for this term */
Fts5ExprTerm *pSynonym; /* Pointer to first in list of synonyms */
};
@@ -229810,7 +235661,7 @@ static int fts5ExprNearInitAll(
p->pIter = 0;
}
rc = sqlite3Fts5IndexQuery(
- pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm),
+ pExpr->pIndex, p->pTerm, p->nQueryTerm,
(pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) |
(pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0),
pNear->pColset,
@@ -230447,7 +236298,7 @@ static void fts5ExprPhraseFree(Fts5ExprPhrase *pPhrase){
Fts5ExprTerm *pSyn;
Fts5ExprTerm *pNext;
Fts5ExprTerm *pTerm = &pPhrase->aTerm[i];
- sqlite3_free(pTerm->zTerm);
+ sqlite3_free(pTerm->pTerm);
sqlite3Fts5IterClose(pTerm->pIter);
for(pSyn=pTerm->pSynonym; pSyn; pSyn=pNext){
pNext = pSyn->pSynonym;
@@ -230545,6 +236396,7 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset(
typedef struct TokenCtx TokenCtx;
struct TokenCtx {
Fts5ExprPhrase *pPhrase;
+ Fts5Config *pConfig;
int rc;
};
@@ -230578,8 +236430,12 @@ static int fts5ParseTokenize(
rc = SQLITE_NOMEM;
}else{
memset(pSyn, 0, (size_t)nByte);
- pSyn->zTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer);
- memcpy(pSyn->zTerm, pToken, nToken);
+ pSyn->pTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer);
+ pSyn->nFullTerm = pSyn->nQueryTerm = nToken;
+ if( pCtx->pConfig->bTokendata ){
+ pSyn->nQueryTerm = (int)strlen(pSyn->pTerm);
+ }
+ memcpy(pSyn->pTerm, pToken, nToken);
pSyn->pSynonym = pPhrase->aTerm[pPhrase->nTerm-1].pSynonym;
pPhrase->aTerm[pPhrase->nTerm-1].pSynonym = pSyn;
}
@@ -230604,7 +236460,11 @@ static int fts5ParseTokenize(
if( rc==SQLITE_OK ){
pTerm = &pPhrase->aTerm[pPhrase->nTerm++];
memset(pTerm, 0, sizeof(Fts5ExprTerm));
- pTerm->zTerm = sqlite3Fts5Strndup(&rc, pToken, nToken);
+ pTerm->pTerm = sqlite3Fts5Strndup(&rc, pToken, nToken);
+ pTerm->nFullTerm = pTerm->nQueryTerm = nToken;
+ if( pCtx->pConfig->bTokendata && rc==SQLITE_OK ){
+ pTerm->nQueryTerm = (int)strlen(pTerm->pTerm);
+ }
}
}
@@ -230671,6 +236531,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm(
memset(&sCtx, 0, sizeof(TokenCtx));
sCtx.pPhrase = pAppend;
+ sCtx.pConfig = pConfig;
rc = fts5ParseStringFromToken(pToken, &z);
if( rc==SQLITE_OK ){
@@ -230718,12 +236579,15 @@ static int sqlite3Fts5ExprClonePhrase(
Fts5Expr **ppNew
){
int rc = SQLITE_OK; /* Return code */
- Fts5ExprPhrase *pOrig; /* The phrase extracted from pExpr */
+ Fts5ExprPhrase *pOrig = 0; /* The phrase extracted from pExpr */
Fts5Expr *pNew = 0; /* Expression to return via *ppNew */
- TokenCtx sCtx = {0,0}; /* Context object for fts5ParseTokenize */
-
- pOrig = pExpr->apExprPhrase[iPhrase];
- pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr));
+ TokenCtx sCtx = {0,0,0}; /* Context object for fts5ParseTokenize */
+ if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){
+ rc = SQLITE_RANGE;
+ }else{
+ pOrig = pExpr->apExprPhrase[iPhrase];
+ pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr));
+ }
if( rc==SQLITE_OK ){
pNew->apExprPhrase = (Fts5ExprPhrase**)sqlite3Fts5MallocZero(&rc,
sizeof(Fts5ExprPhrase*));
@@ -230736,7 +236600,7 @@ static int sqlite3Fts5ExprClonePhrase(
pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc,
sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*));
}
- if( rc==SQLITE_OK ){
+ if( rc==SQLITE_OK && ALWAYS(pOrig!=0) ){
Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset;
if( pColsetOrig ){
sqlite3_int64 nByte;
@@ -230750,26 +236614,27 @@ static int sqlite3Fts5ExprClonePhrase(
}
}
- if( pOrig->nTerm ){
- int i; /* Used to iterate through phrase terms */
- for(i=0; rc==SQLITE_OK && i<pOrig->nTerm; i++){
- int tflags = 0;
- Fts5ExprTerm *p;
- for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){
- const char *zTerm = p->zTerm;
- rc = fts5ParseTokenize((void*)&sCtx, tflags, zTerm, (int)strlen(zTerm),
- 0, 0);
- tflags = FTS5_TOKEN_COLOCATED;
- }
- if( rc==SQLITE_OK ){
- sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix;
- sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst;
+ if( rc==SQLITE_OK ){
+ if( pOrig->nTerm ){
+ int i; /* Used to iterate through phrase terms */
+ sCtx.pConfig = pExpr->pConfig;
+ for(i=0; rc==SQLITE_OK && i<pOrig->nTerm; i++){
+ int tflags = 0;
+ Fts5ExprTerm *p;
+ for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){
+ rc = fts5ParseTokenize((void*)&sCtx,tflags,p->pTerm,p->nFullTerm,0,0);
+ tflags = FTS5_TOKEN_COLOCATED;
+ }
+ if( rc==SQLITE_OK ){
+ sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix;
+ sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst;
+ }
}
+ }else{
+ /* This happens when parsing a token or quoted phrase that contains
+ ** no token characters at all. (e.g ... MATCH '""'). */
+ sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase));
}
- }else{
- /* This happens when parsing a token or quoted phrase that contains
- ** no token characters at all. (e.g ... MATCH '""'). */
- sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase));
}
if( rc==SQLITE_OK && ALWAYS(sCtx.pPhrase) ){
@@ -231139,11 +237004,13 @@ static Fts5ExprNode *fts5ParsePhraseToAnd(
if( parseGrowPhraseArray(pParse) ){
fts5ExprPhraseFree(pPhrase);
}else{
+ Fts5ExprTerm *p = &pNear->apPhrase[0]->aTerm[ii];
+ Fts5ExprTerm *pTo = &pPhrase->aTerm[0];
pParse->apPhrase[pParse->nPhrase++] = pPhrase;
pPhrase->nTerm = 1;
- pPhrase->aTerm[0].zTerm = sqlite3Fts5Strndup(
- &pParse->rc, pNear->apPhrase[0]->aTerm[ii].zTerm, -1
- );
+ pTo->pTerm = sqlite3Fts5Strndup(&pParse->rc, p->pTerm, p->nFullTerm);
+ pTo->nQueryTerm = p->nQueryTerm;
+ pTo->nFullTerm = p->nFullTerm;
pRet->apChild[ii] = sqlite3Fts5ParseNode(pParse, FTS5_STRING,
0, 0, sqlite3Fts5ParseNearset(pParse, 0, pPhrase)
);
@@ -231320,7 +237187,7 @@ static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd(
return pRet;
}
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){
sqlite3_int64 nByte = 0;
Fts5ExprTerm *p;
@@ -231328,16 +237195,17 @@ static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){
/* Determine the maximum amount of space required. */
for(p=pTerm; p; p=p->pSynonym){
- nByte += (int)strlen(pTerm->zTerm) * 2 + 3 + 2;
+ nByte += pTerm->nQueryTerm * 2 + 3 + 2;
}
zQuoted = sqlite3_malloc64(nByte);
if( zQuoted ){
int i = 0;
for(p=pTerm; p; p=p->pSynonym){
- char *zIn = p->zTerm;
+ char *zIn = p->pTerm;
+ char *zEnd = &zIn[p->nQueryTerm];
zQuoted[i++] = '"';
- while( *zIn ){
+ while( zIn<zEnd ){
if( *zIn=='"' ) zQuoted[i++] = '"';
zQuoted[i++] = *zIn++;
}
@@ -231415,8 +237283,10 @@ static char *fts5ExprPrintTcl(
zRet = fts5PrintfAppend(zRet, " {");
for(iTerm=0; zRet && iTerm<pPhrase->nTerm; iTerm++){
- char *zTerm = pPhrase->aTerm[iTerm].zTerm;
- zRet = fts5PrintfAppend(zRet, "%s%s", iTerm==0?"":" ", zTerm);
+ Fts5ExprTerm *p = &pPhrase->aTerm[iTerm];
+ zRet = fts5PrintfAppend(zRet, "%s%.*s", iTerm==0?"":" ",
+ p->nQueryTerm, p->pTerm
+ );
if( pPhrase->aTerm[iTerm].bPrefix ){
zRet = fts5PrintfAppend(zRet, "*");
}
@@ -231426,6 +237296,8 @@ static char *fts5ExprPrintTcl(
if( zRet==0 ) return 0;
}
+ }else if( pExpr->eType==0 ){
+ zRet = sqlite3_mprintf("{}");
}else{
char const *zOp = 0;
int i;
@@ -231687,14 +237559,14 @@ static void fts5ExprFold(
sqlite3_result_int(pCtx, sqlite3Fts5UnicodeFold(iCode, bRemoveDiacritics));
}
}
-#endif /* ifdef SQLITE_TEST */
+#endif /* if SQLITE_TEST || SQLITE_FTS5_DEBUG */
/*
** This is called during initialization to register the fts5_expr() scalar
** UDF with the SQLite handle passed as the only argument.
*/
static int sqlite3Fts5ExprInit(Fts5Global *pGlobal, sqlite3 *db){
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
struct Fts5ExprFunc {
const char *z;
void (*x)(sqlite3_context*,int,sqlite3_value**);
@@ -231815,6 +237687,17 @@ static int fts5ExprColsetTest(Fts5Colset *pColset, int iCol){
return 0;
}
+/*
+** pToken is a buffer nToken bytes in size that may or may not contain
+** an embedded 0x00 byte. If it does, return the number of bytes in
+** the buffer before the 0x00. If it does not, return nToken.
+*/
+static int fts5QueryTerm(const char *pToken, int nToken){
+ int ii;
+ for(ii=0; ii<nToken && pToken[ii]; ii++){}
+ return ii;
+}
+
static int fts5ExprPopulatePoslistsCb(
void *pCtx, /* Copy of 2nd argument to xTokenize() */
int tflags, /* Mask of FTS5_TOKEN_* flags */
@@ -231826,22 +237709,33 @@ static int fts5ExprPopulatePoslistsCb(
Fts5ExprCtx *p = (Fts5ExprCtx*)pCtx;
Fts5Expr *pExpr = p->pExpr;
int i;
+ int nQuery = nToken;
+ i64 iRowid = pExpr->pRoot->iRowid;
UNUSED_PARAM2(iUnused1, iUnused2);
- if( nToken>FTS5_MAX_TOKEN_SIZE ) nToken = FTS5_MAX_TOKEN_SIZE;
+ if( nQuery>FTS5_MAX_TOKEN_SIZE ) nQuery = FTS5_MAX_TOKEN_SIZE;
+ if( pExpr->pConfig->bTokendata ){
+ nQuery = fts5QueryTerm(pToken, nQuery);
+ }
if( (tflags & FTS5_TOKEN_COLOCATED)==0 ) p->iOff++;
for(i=0; i<pExpr->nPhrase; i++){
- Fts5ExprTerm *pTerm;
+ Fts5ExprTerm *pT;
if( p->aPopulator[i].bOk==0 ) continue;
- for(pTerm=&pExpr->apExprPhrase[i]->aTerm[0]; pTerm; pTerm=pTerm->pSynonym){
- int nTerm = (int)strlen(pTerm->zTerm);
- if( (nTerm==nToken || (nTerm<nToken && pTerm->bPrefix))
- && memcmp(pTerm->zTerm, pToken, nTerm)==0
+ for(pT=&pExpr->apExprPhrase[i]->aTerm[0]; pT; pT=pT->pSynonym){
+ if( (pT->nQueryTerm==nQuery || (pT->nQueryTerm<nQuery && pT->bPrefix))
+ && memcmp(pT->pTerm, pToken, pT->nQueryTerm)==0
){
int rc = sqlite3Fts5PoslistWriterAppend(
&pExpr->apExprPhrase[i]->poslist, &p->aPopulator[i].writer, p->iOff
);
+ if( rc==SQLITE_OK && pExpr->pConfig->bTokendata && !pT->bPrefix ){
+ int iCol = p->iOff>>32;
+ int iTokOff = p->iOff & 0x7FFFFFFF;
+ rc = sqlite3Fts5IndexIterWriteTokendata(
+ pT->pIter, pToken, nToken, iRowid, iCol, iTokOff
+ );
+ }
if( rc ) return rc;
break;
}
@@ -231978,6 +237872,83 @@ static int sqlite3Fts5ExprPhraseCollist(
}
/*
+** Does the work of the fts5_api.xQueryToken() API method.
+*/
+static int sqlite3Fts5ExprQueryToken(
+ Fts5Expr *pExpr,
+ int iPhrase,
+ int iToken,
+ const char **ppOut,
+ int *pnOut
+){
+ Fts5ExprPhrase *pPhrase = 0;
+
+ if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){
+ return SQLITE_RANGE;
+ }
+ pPhrase = pExpr->apExprPhrase[iPhrase];
+ if( iToken<0 || iToken>=pPhrase->nTerm ){
+ return SQLITE_RANGE;
+ }
+
+ *ppOut = pPhrase->aTerm[iToken].pTerm;
+ *pnOut = pPhrase->aTerm[iToken].nFullTerm;
+ return SQLITE_OK;
+}
+
+/*
+** Does the work of the fts5_api.xInstToken() API method.
+*/
+static int sqlite3Fts5ExprInstToken(
+ Fts5Expr *pExpr,
+ i64 iRowid,
+ int iPhrase,
+ int iCol,
+ int iOff,
+ int iToken,
+ const char **ppOut,
+ int *pnOut
+){
+ Fts5ExprPhrase *pPhrase = 0;
+ Fts5ExprTerm *pTerm = 0;
+ int rc = SQLITE_OK;
+
+ if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){
+ return SQLITE_RANGE;
+ }
+ pPhrase = pExpr->apExprPhrase[iPhrase];
+ if( iToken<0 || iToken>=pPhrase->nTerm ){
+ return SQLITE_RANGE;
+ }
+ pTerm = &pPhrase->aTerm[iToken];
+ if( pTerm->bPrefix==0 ){
+ if( pExpr->pConfig->bTokendata ){
+ rc = sqlite3Fts5IterToken(
+ pTerm->pIter, iRowid, iCol, iOff+iToken, ppOut, pnOut
+ );
+ }else{
+ *ppOut = pTerm->pTerm;
+ *pnOut = pTerm->nFullTerm;
+ }
+ }
+ return rc;
+}
+
+/*
+** Clear the token mappings for all Fts5IndexIter objects mannaged by
+** the expression passed as the only argument.
+*/
+static void sqlite3Fts5ExprClearTokens(Fts5Expr *pExpr){
+ int ii;
+ for(ii=0; ii<pExpr->nPhrase; ii++){
+ Fts5ExprTerm *pT;
+ for(pT=&pExpr->apExprPhrase[ii]->aTerm[0]; pT; pT=pT->pSynonym){
+ sqlite3Fts5IndexIterClearTokendata(pT->pIter);
+ }
+ }
+}
+
+/*
** 2014 August 11
**
** The author disclaims copyright to this source code. In place of
@@ -232015,10 +237986,15 @@ struct Fts5Hash {
/*
** Each entry in the hash table is represented by an object of the
-** following type. Each object, its key (a nul-terminated string) and
-** its current data are stored in a single memory allocation. The
-** key immediately follows the object in memory. The position list
-** data immediately follows the key data in memory.
+** following type. Each object, its key, and its current data are stored
+** in a single memory allocation. The key immediately follows the object
+** in memory. The position list data immediately follows the key data
+** in memory.
+**
+** The key is Fts5HashEntry.nKey bytes in size. It consists of a single
+** byte identifying the index (either the main term index or a prefix-index),
+** followed by the term data. For example: "0token". There is no
+** nul-terminator - in this case nKey=6.
**
** The data that follows the key is in a similar, but not identical format
** to the doclist data stored in the database. It is:
@@ -232153,8 +238129,7 @@ static int fts5HashResize(Fts5Hash *pHash){
unsigned int iHash;
Fts5HashEntry *p = apOld[i];
apOld[i] = p->pHashNext;
- iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p),
- (int)strlen(fts5EntryKey(p)));
+ iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p), p->nKey);
p->pHashNext = apNew[iHash];
apNew[iHash] = p;
}
@@ -232238,7 +238213,7 @@ static int sqlite3Fts5HashWrite(
for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){
char *zKey = fts5EntryKey(p);
if( zKey[0]==bByte
- && p->nKey==nToken
+ && p->nKey==nToken+1
&& memcmp(&zKey[1], pToken, nToken)==0
){
break;
@@ -232268,9 +238243,9 @@ static int sqlite3Fts5HashWrite(
zKey[0] = bByte;
memcpy(&zKey[1], pToken, nToken);
assert( iHash==fts5HashKey(pHash->nSlot, (u8*)zKey, nToken+1) );
- p->nKey = nToken;
+ p->nKey = nToken+1;
zKey[nToken+1] = '\0';
- p->nData = nToken+1 + 1 + sizeof(Fts5HashEntry);
+ p->nData = nToken+1 + sizeof(Fts5HashEntry);
p->pHashNext = pHash->aSlot[iHash];
pHash->aSlot[iHash] = p;
pHash->nEntry++;
@@ -232387,12 +238362,17 @@ static Fts5HashEntry *fts5HashEntryMerge(
*ppOut = p1;
p1 = 0;
}else{
- int i = 0;
char *zKey1 = fts5EntryKey(p1);
char *zKey2 = fts5EntryKey(p2);
- while( zKey1[i]==zKey2[i] ) i++;
+ int nMin = MIN(p1->nKey, p2->nKey);
- if( ((u8)zKey1[i])>((u8)zKey2[i]) ){
+ int cmp = memcmp(zKey1, zKey2, nMin);
+ if( cmp==0 ){
+ cmp = p1->nKey - p2->nKey;
+ }
+ assert( cmp!=0 );
+
+ if( cmp>0 ){
/* p2 is smaller */
*ppOut = p2;
ppOut = &p2->pScanNext;
@@ -232411,10 +238391,8 @@ static Fts5HashEntry *fts5HashEntryMerge(
}
/*
-** Extract all tokens from hash table iHash and link them into a list
-** in sorted order. The hash table is cleared before returning. It is
-** the responsibility of the caller to free the elements of the returned
-** list.
+** Link all tokens from hash table iHash into a list in sorted order. The
+** tokens are not removed from the hash table.
*/
static int fts5HashEntrySort(
Fts5Hash *pHash,
@@ -232436,7 +238414,7 @@ static int fts5HashEntrySort(
Fts5HashEntry *pIter;
for(pIter=pHash->aSlot[iSlot]; pIter; pIter=pIter->pHashNext){
if( pTerm==0
- || (pIter->nKey+1>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm))
+ || (pIter->nKey>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm))
){
Fts5HashEntry *pEntry = pIter;
pEntry->pScanNext = 0;
@@ -232454,7 +238432,6 @@ static int fts5HashEntrySort(
pList = fts5HashEntryMerge(pList, ap[i]);
}
- pHash->nEntry = 0;
sqlite3_free(ap);
*ppSorted = pList;
return SQLITE_OK;
@@ -232476,12 +238453,11 @@ static int sqlite3Fts5HashQuery(
for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){
zKey = fts5EntryKey(p);
- assert( p->nKey+1==(int)strlen(zKey) );
- if( nTerm==p->nKey+1 && memcmp(zKey, pTerm, nTerm)==0 ) break;
+ if( nTerm==p->nKey && memcmp(zKey, pTerm, nTerm)==0 ) break;
}
if( p ){
- int nHashPre = sizeof(Fts5HashEntry) + nTerm + 1;
+ int nHashPre = sizeof(Fts5HashEntry) + nTerm;
int nList = p->nData - nHashPre;
u8 *pRet = (u8*)(*ppOut = sqlite3_malloc64(nPre + nList + 10));
if( pRet ){
@@ -232508,6 +238484,28 @@ static int sqlite3Fts5HashScanInit(
return fts5HashEntrySort(p, pTerm, nTerm, &p->pScan);
}
+#ifdef SQLITE_DEBUG
+static int fts5HashCount(Fts5Hash *pHash){
+ int nEntry = 0;
+ int ii;
+ for(ii=0; ii<pHash->nSlot; ii++){
+ Fts5HashEntry *p = 0;
+ for(p=pHash->aSlot[ii]; p; p=p->pHashNext){
+ nEntry++;
+ }
+ }
+ return nEntry;
+}
+#endif
+
+/*
+** Return true if the hash table is empty, false otherwise.
+*/
+static int sqlite3Fts5HashIsEmpty(Fts5Hash *pHash){
+ assert( pHash->nEntry==fts5HashCount(pHash) );
+ return pHash->nEntry==0;
+}
+
static void sqlite3Fts5HashScanNext(Fts5Hash *p){
assert( !sqlite3Fts5HashScanEof(p) );
p->pScan = p->pScan->pScanNext;
@@ -232520,19 +238518,22 @@ static int sqlite3Fts5HashScanEof(Fts5Hash *p){
static void sqlite3Fts5HashScanEntry(
Fts5Hash *pHash,
const char **pzTerm, /* OUT: term (nul-terminated) */
+ int *pnTerm, /* OUT: Size of term in bytes */
const u8 **ppDoclist, /* OUT: pointer to doclist */
int *pnDoclist /* OUT: size of doclist in bytes */
){
Fts5HashEntry *p;
if( (p = pHash->pScan) ){
char *zKey = fts5EntryKey(p);
- int nTerm = (int)strlen(zKey);
+ int nTerm = p->nKey;
fts5HashAddPoslistSize(pHash, p, 0);
*pzTerm = zKey;
- *ppDoclist = (const u8*)&zKey[nTerm+1];
- *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm + 1);
+ *pnTerm = nTerm;
+ *ppDoclist = (const u8*)&zKey[nTerm];
+ *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm);
}else{
*pzTerm = 0;
+ *pnTerm = 0;
*ppDoclist = 0;
*pnDoclist = 0;
}
@@ -232597,13 +238598,31 @@ static void sqlite3Fts5HashScanEntry(
#define FTS5_MAX_LEVEL 64
/*
+** There are two versions of the format used for the structure record:
+**
+** 1. the legacy format, that may be read by all fts5 versions, and
+**
+** 2. the V2 format, which is used by contentless_delete=1 databases.
+**
+** Both begin with a 4-byte "configuration cookie" value. Then, a legacy
+** format structure record contains a varint - the number of levels in
+** the structure. Whereas a V2 structure record contains the constant
+** 4 bytes [0xff 0x00 0x00 0x01]. This is unambiguous as the value of a
+** varint has to be at least 16256 to begin with "0xFF". And the default
+** maximum number of levels is 64.
+**
+** See below for more on structure record formats.
+*/
+#define FTS5_STRUCTURE_V2 "\xFF\x00\x00\x01"
+
+/*
** Details:
**
** The %_data table managed by this module,
**
** CREATE TABLE %_data(id INTEGER PRIMARY KEY, block BLOB);
**
-** , contains the following 5 types of records. See the comments surrounding
+** , contains the following 6 types of records. See the comments surrounding
** the FTS5_*_ROWID macros below for a description of how %_data rowids are
** assigned to each fo them.
**
@@ -232612,12 +238631,12 @@ static void sqlite3Fts5HashScanEntry(
** The set of segments that make up an index - the index structure - are
** recorded in a single record within the %_data table. The record consists
** of a single 32-bit configuration cookie value followed by a list of
-** SQLite varints. If the FTS table features more than one index (because
-** there are one or more prefix indexes), it is guaranteed that all share
-** the same cookie value.
+** SQLite varints.
+**
+** If the structure record is a V2 record, the configuration cookie is
+** followed by the following 4 bytes: [0xFF 0x00 0x00 0x01].
**
-** Immediately following the configuration cookie, the record begins with
-** three varints:
+** Next, the record continues with three varints:
**
** + number of levels,
** + total number of segments on all levels,
@@ -232632,6 +238651,12 @@ static void sqlite3Fts5HashScanEntry(
** + first leaf page number (often 1, always greater than 0)
** + final leaf page number
**
+** Then, for V2 structures only:
+**
+** + lower origin counter value,
+** + upper origin counter value,
+** + the number of tombstone hash pages.
+**
** 2. The Averages Record:
**
** A single record within the %_data table. The data is a list of varints.
@@ -232747,6 +238772,38 @@ static void sqlite3Fts5HashScanEntry(
** * A list of delta-encoded varints - the first rowid on each subsequent
** child page.
**
+** 6. Tombstone Hash Page
+**
+** These records are only ever present in contentless_delete=1 tables.
+** There are zero or more of these associated with each segment. They
+** are used to store the tombstone rowids for rows contained in the
+** associated segments.
+**
+** The set of nHashPg tombstone hash pages associated with a single
+** segment together form a single hash table containing tombstone rowids.
+** To find the page of the hash on which a key might be stored:
+**
+** iPg = (rowid % nHashPg)
+**
+** Then, within page iPg, which has nSlot slots:
+**
+** iSlot = (rowid / nHashPg) % nSlot
+**
+** Each tombstone hash page begins with an 8 byte header:
+**
+** 1-byte: Key-size (the size in bytes of each slot). Either 4 or 8.
+** 1-byte: rowid-0-tombstone flag. This flag is only valid on the
+** first tombstone hash page for each segment (iPg=0). If set,
+** the hash table contains rowid 0. If clear, it does not.
+** Rowid 0 is handled specially.
+** 2-bytes: unused.
+** 4-bytes: Big-endian integer containing number of entries on page.
+**
+** Following this are nSlot 4 or 8 byte slots (depending on the key-size
+** in the first byte of the page header). The number of slots may be
+** determined based on the size of the page record and the key-size:
+**
+** nSlot = (nByte - 8) / key-size
*/
/*
@@ -232780,6 +238837,7 @@ static void sqlite3Fts5HashScanEntry(
#define FTS5_SEGMENT_ROWID(segid, pgno) fts5_dri(segid, 0, 0, pgno)
#define FTS5_DLIDX_ROWID(segid, height, pgno) fts5_dri(segid, 1, height, pgno)
+#define FTS5_TOMBSTONE_ROWID(segid,ipg) fts5_dri(segid+(1<<16), 0, 0, ipg)
#ifdef SQLITE_DEBUG
static int sqlite3Fts5Corrupt() { return SQLITE_CORRUPT_VTAB; }
@@ -232806,6 +238864,9 @@ typedef struct Fts5SegWriter Fts5SegWriter;
typedef struct Fts5Structure Fts5Structure;
typedef struct Fts5StructureLevel Fts5StructureLevel;
typedef struct Fts5StructureSegment Fts5StructureSegment;
+typedef struct Fts5TokenDataIter Fts5TokenDataIter;
+typedef struct Fts5TokenDataMap Fts5TokenDataMap;
+typedef struct Fts5TombstoneArray Fts5TombstoneArray;
struct Fts5Data {
u8 *p; /* Pointer to buffer containing record */
@@ -232815,6 +238876,12 @@ struct Fts5Data {
/*
** One object per %_data table.
+**
+** nContentlessDelete:
+** The number of contentless delete operations since the most recent
+** call to fts5IndexFlush() or fts5IndexDiscardData(). This is tracked
+** so that extra auto-merge work can be done by fts5IndexFlush() to
+** account for the delete operations.
*/
struct Fts5Index {
Fts5Config *pConfig; /* Virtual table configuration */
@@ -232829,9 +238896,12 @@ struct Fts5Index {
int nPendingData; /* Current bytes of pending data */
i64 iWriteRowid; /* Rowid for current doc being written */
int bDelete; /* Current write is a delete */
+ int nContentlessDelete; /* Number of contentless delete ops */
+ int nPendingRow; /* Number of INSERT in hash table */
/* Error state. */
int rc; /* Current error code */
+ int flushRc;
/* State used by the fts5DataXXX() functions. */
sqlite3_blob *pReader; /* RO incr-blob open on %_data table */
@@ -232840,6 +238910,7 @@ struct Fts5Index {
sqlite3_stmt *pIdxWriter; /* "INSERT ... %_idx VALUES(?,?,?,?)" */
sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=?" */
sqlite3_stmt *pIdxSelect;
+ sqlite3_stmt *pIdxNextSelect;
int nRead; /* Total number of blocks read */
sqlite3_stmt *pDeleteFromIdx;
@@ -232863,11 +238934,23 @@ struct Fts5DoclistIter {
** The contents of the "structure" record for each index are represented
** using an Fts5Structure record in memory. Which uses instances of the
** other Fts5StructureXXX types as components.
+**
+** nOriginCntr:
+** This value is set to non-zero for structure records created for
+** contentlessdelete=1 tables only. In that case it represents the
+** origin value to apply to the next top-level segment created.
*/
struct Fts5StructureSegment {
int iSegid; /* Segment id */
int pgnoFirst; /* First leaf page number in segment */
int pgnoLast; /* Last leaf page number in segment */
+
+ /* contentlessdelete=1 tables only: */
+ u64 iOrigin1;
+ u64 iOrigin2;
+ int nPgTombstone; /* Number of tombstone hash table pages */
+ u64 nEntryTombstone; /* Number of tombstone entries that "count" */
+ u64 nEntry; /* Number of rows in this segment */
};
struct Fts5StructureLevel {
int nMerge; /* Number of segments in incr-merge */
@@ -232877,6 +238960,7 @@ struct Fts5StructureLevel {
struct Fts5Structure {
int nRef; /* Object reference count */
u64 nWriteCounter; /* Total leaves written to level 0 */
+ u64 nOriginCntr; /* Origin value for next top-level segment */
int nSegment; /* Total segments in this structure */
int nLevel; /* Number of levels in this index */
Fts5StructureLevel aLevel[1]; /* Array of nLevel level objects */
@@ -232965,6 +239049,13 @@ struct Fts5CResult {
**
** iTermIdx:
** Index of current term on iTermLeafPgno.
+**
+** apTombstone/nTombstone:
+** These are used for contentless_delete=1 tables only. When the cursor
+** is first allocated, the apTombstone[] array is allocated so that it
+** is large enough for all tombstones hash pages associated with the
+** segment. The pages themselves are loaded lazily from the database as
+** they are required.
*/
struct Fts5SegIter {
Fts5StructureSegment *pSeg; /* Segment to iterate through */
@@ -232973,6 +239064,7 @@ struct Fts5SegIter {
Fts5Data *pLeaf; /* Current leaf data */
Fts5Data *pNextLeaf; /* Leaf page (iLeafPgno+1) */
i64 iLeafOffset; /* Byte offset within current leaf */
+ Fts5TombstoneArray *pTombArray; /* Array of tombstone pages */
/* Next method */
void (*xNext)(Fts5Index*, Fts5SegIter*, int*);
@@ -233000,6 +239092,15 @@ struct Fts5SegIter {
};
/*
+** Array of tombstone pages. Reference counted.
+*/
+struct Fts5TombstoneArray {
+ int nRef; /* Number of pointers to this object */
+ int nTombstone;
+ Fts5Data *apTombstone[1]; /* Array of tombstone pages */
+};
+
+/*
** Argument is a pointer to an Fts5Data structure that contains a
** leaf page.
*/
@@ -233043,9 +239144,16 @@ struct Fts5SegIter {
** poslist:
** Used by sqlite3Fts5IterPoslist() when the poslist needs to be buffered.
** There is no way to tell if this is populated or not.
+**
+** pColset:
+** If not NULL, points to an object containing a set of column indices.
+** Only matches that occur in one of these columns will be returned.
+** The Fts5Iter does not own the Fts5Colset object, and so it is not
+** freed when the iterator is closed - it is owned by the upper layer.
*/
struct Fts5Iter {
Fts5IndexIter base; /* Base class containing output vars */
+ Fts5TokenDataIter *pTokenDataIter;
Fts5Index *pIndex; /* Index that owns this iterator */
Fts5Buffer poslist; /* Buffer containing current poslist */
@@ -233063,7 +239171,6 @@ struct Fts5Iter {
Fts5SegIter aSeg[1]; /* Array of segment iterators */
};
-
/*
** An instance of the following type is used to iterate through the contents
** of a doclist-index record.
@@ -233103,6 +239210,60 @@ static u16 fts5GetU16(const u8 *aIn){
}
/*
+** The only argument points to a buffer at least 8 bytes in size. This
+** function interprets the first 8 bytes of the buffer as a 64-bit big-endian
+** unsigned integer and returns the result.
+*/
+static u64 fts5GetU64(u8 *a){
+ return ((u64)a[0] << 56)
+ + ((u64)a[1] << 48)
+ + ((u64)a[2] << 40)
+ + ((u64)a[3] << 32)
+ + ((u64)a[4] << 24)
+ + ((u64)a[5] << 16)
+ + ((u64)a[6] << 8)
+ + ((u64)a[7] << 0);
+}
+
+/*
+** The only argument points to a buffer at least 4 bytes in size. This
+** function interprets the first 4 bytes of the buffer as a 32-bit big-endian
+** unsigned integer and returns the result.
+*/
+static u32 fts5GetU32(const u8 *a){
+ return ((u32)a[0] << 24)
+ + ((u32)a[1] << 16)
+ + ((u32)a[2] << 8)
+ + ((u32)a[3] << 0);
+}
+
+/*
+** Write iVal, formated as a 64-bit big-endian unsigned integer, to the
+** buffer indicated by the first argument.
+*/
+static void fts5PutU64(u8 *a, u64 iVal){
+ a[0] = ((iVal >> 56) & 0xFF);
+ a[1] = ((iVal >> 48) & 0xFF);
+ a[2] = ((iVal >> 40) & 0xFF);
+ a[3] = ((iVal >> 32) & 0xFF);
+ a[4] = ((iVal >> 24) & 0xFF);
+ a[5] = ((iVal >> 16) & 0xFF);
+ a[6] = ((iVal >> 8) & 0xFF);
+ a[7] = ((iVal >> 0) & 0xFF);
+}
+
+/*
+** Write iVal, formated as a 32-bit big-endian unsigned integer, to the
+** buffer indicated by the first argument.
+*/
+static void fts5PutU32(u8 *a, u32 iVal){
+ a[0] = ((iVal >> 24) & 0xFF);
+ a[1] = ((iVal >> 16) & 0xFF);
+ a[2] = ((iVal >> 8) & 0xFF);
+ a[3] = ((iVal >> 0) & 0xFF);
+}
+
+/*
** Allocate and return a buffer at least nByte bytes in size.
**
** If an OOM error is encountered, return NULL and set the error code in
@@ -233329,10 +239490,17 @@ static void fts5DataDelete(Fts5Index *p, i64 iFirst, i64 iLast){
/*
** Remove all records associated with segment iSegid.
*/
-static void fts5DataRemoveSegment(Fts5Index *p, int iSegid){
+static void fts5DataRemoveSegment(Fts5Index *p, Fts5StructureSegment *pSeg){
+ int iSegid = pSeg->iSegid;
i64 iFirst = FTS5_SEGMENT_ROWID(iSegid, 0);
i64 iLast = FTS5_SEGMENT_ROWID(iSegid+1, 0)-1;
fts5DataDelete(p, iFirst, iLast);
+
+ if( pSeg->nPgTombstone ){
+ i64 iTomb1 = FTS5_TOMBSTONE_ROWID(iSegid, 0);
+ i64 iTomb2 = FTS5_TOMBSTONE_ROWID(iSegid, pSeg->nPgTombstone-1);
+ fts5DataDelete(p, iTomb1, iTomb2);
+ }
if( p->pIdxDeleter==0 ){
Fts5Config *pConfig = p->pConfig;
fts5IndexPrepareStmt(p, &p->pIdxDeleter, sqlite3_mprintf(
@@ -233443,11 +239611,19 @@ static int fts5StructureDecode(
int nSegment = 0;
sqlite3_int64 nByte; /* Bytes of space to allocate at pRet */
Fts5Structure *pRet = 0; /* Structure object to return */
+ int bStructureV2 = 0; /* True for FTS5_STRUCTURE_V2 */
+ u64 nOriginCntr = 0; /* Largest origin value seen so far */
/* Grab the cookie value */
if( piCookie ) *piCookie = sqlite3Fts5Get32(pData);
i = 4;
+ /* Check if this is a V2 structure record. Set bStructureV2 if it is. */
+ if( 0==memcmp(&pData[i], FTS5_STRUCTURE_V2, 4) ){
+ i += 4;
+ bStructureV2 = 1;
+ }
+
/* Read the total number of levels and segments from the start of the
** structure record. */
i += fts5GetVarint32(&pData[i], nLevel);
@@ -233498,6 +239674,14 @@ static int fts5StructureDecode(
i += fts5GetVarint32(&pData[i], pSeg->iSegid);
i += fts5GetVarint32(&pData[i], pSeg->pgnoFirst);
i += fts5GetVarint32(&pData[i], pSeg->pgnoLast);
+ if( bStructureV2 ){
+ i += fts5GetVarint(&pData[i], &pSeg->iOrigin1);
+ i += fts5GetVarint(&pData[i], &pSeg->iOrigin2);
+ i += fts5GetVarint32(&pData[i], pSeg->nPgTombstone);
+ i += fts5GetVarint(&pData[i], &pSeg->nEntryTombstone);
+ i += fts5GetVarint(&pData[i], &pSeg->nEntry);
+ nOriginCntr = MAX(nOriginCntr, pSeg->iOrigin2);
+ }
if( pSeg->pgnoLast<pSeg->pgnoFirst ){
rc = FTS5_CORRUPT;
break;
@@ -233508,6 +239692,9 @@ static int fts5StructureDecode(
}
}
if( nSegment!=0 && rc==SQLITE_OK ) rc = FTS5_CORRUPT;
+ if( bStructureV2 ){
+ pRet->nOriginCntr = nOriginCntr+1;
+ }
if( rc!=SQLITE_OK ){
fts5StructureRelease(pRet);
@@ -233720,6 +239907,7 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){
Fts5Buffer buf; /* Buffer to serialize record into */
int iLvl; /* Used to iterate through levels */
int iCookie; /* Cookie value to store */
+ int nHdr = (pStruct->nOriginCntr>0 ? (4+4+9+9+9) : (4+9+9));
assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) );
memset(&buf, 0, sizeof(Fts5Buffer));
@@ -233728,9 +239916,12 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){
iCookie = p->pConfig->iCookie;
if( iCookie<0 ) iCookie = 0;
- if( 0==sqlite3Fts5BufferSize(&p->rc, &buf, 4+9+9+9) ){
+ if( 0==sqlite3Fts5BufferSize(&p->rc, &buf, nHdr) ){
sqlite3Fts5Put32(buf.p, iCookie);
buf.n = 4;
+ if( pStruct->nOriginCntr>0 ){
+ fts5BufferSafeAppendBlob(&buf, FTS5_STRUCTURE_V2, 4);
+ }
fts5BufferSafeAppendVarint(&buf, pStruct->nLevel);
fts5BufferSafeAppendVarint(&buf, pStruct->nSegment);
fts5BufferSafeAppendVarint(&buf, (i64)pStruct->nWriteCounter);
@@ -233744,9 +239935,17 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){
assert( pLvl->nMerge<=pLvl->nSeg );
for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){
- fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].iSegid);
- fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoFirst);
- fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoLast);
+ Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg];
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->iSegid);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->pgnoFirst);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->pgnoLast);
+ if( pStruct->nOriginCntr>0 ){
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->iOrigin1);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->iOrigin2);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->nPgTombstone);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->nEntryTombstone);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->nEntry);
+ }
}
}
@@ -233889,9 +240088,9 @@ static int fts5DlidxLvlNext(Fts5DlidxLvl *pLvl){
}
if( iOff<pData->nn ){
- i64 iVal;
+ u64 iVal;
pLvl->iLeafPgno += (iOff - pLvl->iOff) + 1;
- iOff += fts5GetVarint(&pData->p[iOff], (u64*)&iVal);
+ iOff += fts5GetVarint(&pData->p[iOff], &iVal);
pLvl->iRowid += iVal;
pLvl->iOff = iOff;
}else{
@@ -234270,6 +240469,25 @@ static void fts5SegIterSetNext(Fts5Index *p, Fts5SegIter *pIter){
}
/*
+** Allocate a tombstone hash page array object (pIter->pTombArray) for
+** the iterator passed as the second argument. If an OOM error occurs,
+** leave an error in the Fts5Index object.
+*/
+static void fts5SegIterAllocTombstone(Fts5Index *p, Fts5SegIter *pIter){
+ const int nTomb = pIter->pSeg->nPgTombstone;
+ if( nTomb>0 ){
+ int nByte = nTomb * sizeof(Fts5Data*) + sizeof(Fts5TombstoneArray);
+ Fts5TombstoneArray *pNew;
+ pNew = (Fts5TombstoneArray*)sqlite3Fts5MallocZero(&p->rc, nByte);
+ if( pNew ){
+ pNew->nTombstone = nTomb;
+ pNew->nRef = 1;
+ pIter->pTombArray = pNew;
+ }
+ }
+}
+
+/*
** Initialize the iterator object pIter to iterate through the entries in
** segment pSeg. The iterator is left pointing to the first entry when
** this function returns.
@@ -234310,6 +240528,7 @@ static void fts5SegIterInit(
pIter->iPgidxOff = pIter->pLeaf->szLeaf+1;
fts5SegIterLoadTerm(p, pIter, 0);
fts5SegIterLoadNPos(p, pIter);
+ fts5SegIterAllocTombstone(p, pIter);
}
}
@@ -234520,15 +240739,16 @@ static void fts5SegIterNext_None(
}else{
const u8 *pList = 0;
const char *zTerm = 0;
+ int nTerm = 0;
int nList;
sqlite3Fts5HashScanNext(p->pHash);
- sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList);
+ sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList);
if( pList==0 ) goto next_none_eof;
pIter->pLeaf->p = (u8*)pList;
pIter->pLeaf->nn = nList;
pIter->pLeaf->szLeaf = nList;
pIter->iEndofDoclist = nList;
- sqlite3Fts5BufferSet(&p->rc,&pIter->term, (int)strlen(zTerm), (u8*)zTerm);
+ sqlite3Fts5BufferSet(&p->rc,&pIter->term, nTerm, (u8*)zTerm);
pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid);
}
@@ -234594,11 +240814,12 @@ static void fts5SegIterNext(
}else if( pIter->pSeg==0 ){
const u8 *pList = 0;
const char *zTerm = 0;
+ int nTerm = 0;
int nList = 0;
assert( (pIter->flags & FTS5_SEGITER_ONETERM) || pbNewTerm );
if( 0==(pIter->flags & FTS5_SEGITER_ONETERM) ){
sqlite3Fts5HashScanNext(p->pHash);
- sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList);
+ sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList);
}
if( pList==0 ){
fts5DataRelease(pIter->pLeaf);
@@ -234608,8 +240829,7 @@ static void fts5SegIterNext(
pIter->pLeaf->nn = nList;
pIter->pLeaf->szLeaf = nList;
pIter->iEndofDoclist = nList+1;
- sqlite3Fts5BufferSet(&p->rc, &pIter->term, (int)strlen(zTerm),
- (u8*)zTerm);
+ sqlite3Fts5BufferSet(&p->rc, &pIter->term, nTerm, (u8*)zTerm);
pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid);
*pbNewTerm = 1;
}
@@ -234995,7 +241215,7 @@ static void fts5SegIterSeekInit(
fts5LeafSeek(p, bGe, pIter, pTerm, nTerm);
}
- if( p->rc==SQLITE_OK && bGe==0 ){
+ if( p->rc==SQLITE_OK && (bGe==0 || (flags & FTS5INDEX_QUERY_SCANONETERM)) ){
pIter->flags |= FTS5_SEGITER_ONETERM;
if( pIter->pLeaf ){
if( flags & FTS5INDEX_QUERY_DESC ){
@@ -235011,6 +241231,9 @@ static void fts5SegIterSeekInit(
}
fts5SegIterSetNext(p, pIter);
+ if( 0==(flags & FTS5INDEX_QUERY_SCANONETERM) ){
+ fts5SegIterAllocTombstone(p, pIter);
+ }
/* Either:
**
@@ -235027,6 +241250,79 @@ static void fts5SegIterSeekInit(
);
}
+
+/*
+** SQL used by fts5SegIterNextInit() to find the page to open.
+*/
+static sqlite3_stmt *fts5IdxNextStmt(Fts5Index *p){
+ if( p->pIdxNextSelect==0 ){
+ Fts5Config *pConfig = p->pConfig;
+ fts5IndexPrepareStmt(p, &p->pIdxNextSelect, sqlite3_mprintf(
+ "SELECT pgno FROM '%q'.'%q_idx' WHERE "
+ "segid=? AND term>? ORDER BY term ASC LIMIT 1",
+ pConfig->zDb, pConfig->zName
+ ));
+
+ }
+ return p->pIdxNextSelect;
+}
+
+/*
+** This is similar to fts5SegIterSeekInit(), except that it initializes
+** the segment iterator to point to the first term following the page
+** with pToken/nToken on it.
+*/
+static void fts5SegIterNextInit(
+ Fts5Index *p,
+ const char *pTerm, int nTerm,
+ Fts5StructureSegment *pSeg, /* Description of segment */
+ Fts5SegIter *pIter /* Object to populate */
+){
+ int iPg = -1; /* Page of segment to open */
+ int bDlidx = 0;
+ sqlite3_stmt *pSel = 0; /* SELECT to find iPg */
+
+ pSel = fts5IdxNextStmt(p);
+ if( pSel ){
+ assert( p->rc==SQLITE_OK );
+ sqlite3_bind_int(pSel, 1, pSeg->iSegid);
+ sqlite3_bind_blob(pSel, 2, pTerm, nTerm, SQLITE_STATIC);
+
+ if( sqlite3_step(pSel)==SQLITE_ROW ){
+ i64 val = sqlite3_column_int64(pSel, 0);
+ iPg = (int)(val>>1);
+ bDlidx = (val & 0x0001);
+ }
+ p->rc = sqlite3_reset(pSel);
+ sqlite3_bind_null(pSel, 2);
+ if( p->rc ) return;
+ }
+
+ memset(pIter, 0, sizeof(*pIter));
+ pIter->pSeg = pSeg;
+ pIter->flags |= FTS5_SEGITER_ONETERM;
+ if( iPg>=0 ){
+ pIter->iLeafPgno = iPg - 1;
+ fts5SegIterNextPage(p, pIter);
+ fts5SegIterSetNext(p, pIter);
+ }
+ if( pIter->pLeaf ){
+ const u8 *a = pIter->pLeaf->p;
+ int iTermOff = 0;
+
+ pIter->iPgidxOff = pIter->pLeaf->szLeaf;
+ pIter->iPgidxOff += fts5GetVarint32(&a[pIter->iPgidxOff], iTermOff);
+ pIter->iLeafOffset = iTermOff;
+ fts5SegIterLoadTerm(p, pIter, 0);
+ fts5SegIterLoadNPos(p, pIter);
+ if( bDlidx ) fts5SegIterLoadDlidx(p, pIter);
+
+ assert( p->rc!=SQLITE_OK ||
+ fts5BufferCompareBlob(&pIter->term, (const u8*)pTerm, nTerm)>0
+ );
+ }
+}
+
/*
** Initialize the object pIter to point to term pTerm/nTerm within the
** in-memory hash table. If there is no such term in the hash-table, the
@@ -235053,14 +241349,21 @@ static void fts5SegIterHashInit(
const u8 *pList = 0;
p->rc = sqlite3Fts5HashScanInit(p->pHash, (const char*)pTerm, nTerm);
- sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &pList, &nList);
- n = (z ? (int)strlen((const char*)z) : 0);
+ sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &n, &pList, &nList);
if( pList ){
pLeaf = fts5IdxMalloc(p, sizeof(Fts5Data));
if( pLeaf ){
pLeaf->p = (u8*)pList;
}
}
+
+ /* The call to sqlite3Fts5HashScanInit() causes the hash table to
+ ** fill the size field of all existing position lists. This means they
+ ** can no longer be appended to. Since the only scenario in which they
+ ** can be appended to is if the previous operation on this table was
+ ** a DELETE, by clearing the Fts5Index.bDelete flag we can avoid this
+ ** possibility altogether. */
+ p->bDelete = 0;
}else{
p->rc = sqlite3Fts5HashQuery(p->pHash, sizeof(Fts5Data),
(const char*)pTerm, nTerm, (void**)&pLeaf, &nList
@@ -235092,12 +241395,44 @@ static void fts5SegIterHashInit(
}
/*
+** Array ap[] contains n elements. Release each of these elements using
+** fts5DataRelease(). Then free the array itself using sqlite3_free().
+*/
+static void fts5IndexFreeArray(Fts5Data **ap, int n){
+ if( ap ){
+ int ii;
+ for(ii=0; ii<n; ii++){
+ fts5DataRelease(ap[ii]);
+ }
+ sqlite3_free(ap);
+ }
+}
+
+/*
+** Decrement the ref-count of the object passed as the only argument. If it
+** reaches 0, free it and its contents.
+*/
+static void fts5TombstoneArrayDelete(Fts5TombstoneArray *p){
+ if( p ){
+ p->nRef--;
+ if( p->nRef<=0 ){
+ int ii;
+ for(ii=0; ii<p->nTombstone; ii++){
+ fts5DataRelease(p->apTombstone[ii]);
+ }
+ sqlite3_free(p);
+ }
+ }
+}
+
+/*
** Zero the iterator passed as the only argument.
*/
static void fts5SegIterClear(Fts5SegIter *pIter){
fts5BufferFree(&pIter->term);
fts5DataRelease(pIter->pLeaf);
fts5DataRelease(pIter->pNextLeaf);
+ fts5TombstoneArrayDelete(pIter->pTombArray);
fts5DlidxIterFree(pIter->pDlidx);
sqlite3_free(pIter->aRowidOffset);
memset(pIter, 0, sizeof(Fts5SegIter));
@@ -235231,7 +241566,6 @@ static int fts5MultiIterDoCompare(Fts5Iter *pIter, int iOut){
assert_nc( i2!=0 );
pRes->bTermEq = 1;
if( p1->iRowid==p2->iRowid ){
- p1->bDel = p2->bDel;
return i2;
}
res = ((p1->iRowid > p2->iRowid)==pIter->bRev) ? -1 : +1;
@@ -235343,7 +241677,6 @@ static void fts5SegIterNextFrom(
}while( p->rc==SQLITE_OK );
}
-
/*
** Free the iterator object passed as the second argument.
*/
@@ -235436,6 +241769,85 @@ static void fts5MultiIterSetEof(Fts5Iter *pIter){
}
/*
+** The argument to this macro must be an Fts5Data structure containing a
+** tombstone hash page. This macro returns the key-size of the hash-page.
+*/
+#define TOMBSTONE_KEYSIZE(pPg) (pPg->p[0]==4 ? 4 : 8)
+
+#define TOMBSTONE_NSLOT(pPg) \
+ ((pPg->nn > 16) ? ((pPg->nn-8) / TOMBSTONE_KEYSIZE(pPg)) : 1)
+
+/*
+** Query a single tombstone hash table for rowid iRowid. Return true if
+** it is found or false otherwise. The tombstone hash table is one of
+** nHashTable tables.
+*/
+static int fts5IndexTombstoneQuery(
+ Fts5Data *pHash, /* Hash table page to query */
+ int nHashTable, /* Number of pages attached to segment */
+ u64 iRowid /* Rowid to query hash for */
+){
+ const int szKey = TOMBSTONE_KEYSIZE(pHash);
+ const int nSlot = TOMBSTONE_NSLOT(pHash);
+ int iSlot = (iRowid / nHashTable) % nSlot;
+ int nCollide = nSlot;
+
+ if( iRowid==0 ){
+ return pHash->p[1];
+ }else if( szKey==4 ){
+ u32 *aSlot = (u32*)&pHash->p[8];
+ while( aSlot[iSlot] ){
+ if( fts5GetU32((u8*)&aSlot[iSlot])==iRowid ) return 1;
+ if( nCollide--==0 ) break;
+ iSlot = (iSlot+1)%nSlot;
+ }
+ }else{
+ u64 *aSlot = (u64*)&pHash->p[8];
+ while( aSlot[iSlot] ){
+ if( fts5GetU64((u8*)&aSlot[iSlot])==iRowid ) return 1;
+ if( nCollide--==0 ) break;
+ iSlot = (iSlot+1)%nSlot;
+ }
+ }
+
+ return 0;
+}
+
+/*
+** Return true if the iterator passed as the only argument points
+** to an segment entry for which there is a tombstone. Return false
+** if there is no tombstone or if the iterator is already at EOF.
+*/
+static int fts5MultiIterIsDeleted(Fts5Iter *pIter){
+ int iFirst = pIter->aFirst[1].iFirst;
+ Fts5SegIter *pSeg = &pIter->aSeg[iFirst];
+ Fts5TombstoneArray *pArray = pSeg->pTombArray;
+
+ if( pSeg->pLeaf && pArray ){
+ /* Figure out which page the rowid might be present on. */
+ int iPg = ((u64)pSeg->iRowid) % pArray->nTombstone;
+ assert( iPg>=0 );
+
+ /* If tombstone hash page iPg has not yet been loaded from the
+ ** database, load it now. */
+ if( pArray->apTombstone[iPg]==0 ){
+ pArray->apTombstone[iPg] = fts5DataRead(pIter->pIndex,
+ FTS5_TOMBSTONE_ROWID(pSeg->pSeg->iSegid, iPg)
+ );
+ if( pArray->apTombstone[iPg]==0 ) return 0;
+ }
+
+ return fts5IndexTombstoneQuery(
+ pArray->apTombstone[iPg],
+ pArray->nTombstone,
+ pSeg->iRowid
+ );
+ }
+
+ return 0;
+}
+
+/*
** Move the iterator to the next entry.
**
** If an error occurs, an error code is left in Fts5Index.rc. It is not
@@ -235472,7 +241884,9 @@ static void fts5MultiIterNext(
fts5AssertMultiIterSetup(p, pIter);
assert( pSeg==&pIter->aSeg[pIter->aFirst[1].iFirst] && pSeg->pLeaf );
- if( pIter->bSkipEmpty==0 || pSeg->nPos ){
+ if( (pIter->bSkipEmpty==0 || pSeg->nPos)
+ && 0==fts5MultiIterIsDeleted(pIter)
+ ){
pIter->xSetOutputs(pIter, pSeg);
return;
}
@@ -235504,7 +241918,9 @@ static void fts5MultiIterNext2(
}
fts5AssertMultiIterSetup(p, pIter);
- }while( fts5MultiIterIsEmpty(p, pIter) );
+ }while( (fts5MultiIterIsEmpty(p, pIter) || fts5MultiIterIsDeleted(pIter))
+ && (p->rc==SQLITE_OK)
+ );
}
}
@@ -235517,7 +241933,7 @@ static Fts5Iter *fts5MultiIterAlloc(
int nSeg
){
Fts5Iter *pNew;
- int nSlot; /* Power of two >= nSeg */
+ i64 nSlot; /* Power of two >= nSeg */
for(nSlot=2; nSlot<nSeg; nSlot=nSlot*2);
pNew = fts5IdxMalloc(p,
@@ -235962,6 +242378,32 @@ static void fts5IterSetOutputCb(int *pRc, Fts5Iter *pIter){
}
}
+/*
+** All the component segment-iterators of pIter have been set up. This
+** functions finishes setup for iterator pIter itself.
+*/
+static void fts5MultiIterFinishSetup(Fts5Index *p, Fts5Iter *pIter){
+ int iIter;
+ for(iIter=pIter->nSeg-1; iIter>0; iIter--){
+ int iEq;
+ if( (iEq = fts5MultiIterDoCompare(pIter, iIter)) ){
+ Fts5SegIter *pSeg = &pIter->aSeg[iEq];
+ if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0);
+ fts5MultiIterAdvanced(p, pIter, iEq, iIter);
+ }
+ }
+ fts5MultiIterSetEof(pIter);
+ fts5AssertMultiIterSetup(p, pIter);
+
+ if( (pIter->bSkipEmpty && fts5MultiIterIsEmpty(p, pIter))
+ || fts5MultiIterIsDeleted(pIter)
+ ){
+ fts5MultiIterNext(p, pIter, 0, 0);
+ }else if( pIter->base.bEof==0 ){
+ Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst];
+ pIter->xSetOutputs(pIter, pSeg);
+ }
+}
/*
** Allocate a new Fts5Iter object.
@@ -236043,29 +242485,12 @@ static void fts5MultiIterNew(
assert( iIter==nSeg );
}
- /* If the above was successful, each component iterators now points
+ /* If the above was successful, each component iterator now points
** to the first entry in its segment. In this case initialize the
** aFirst[] array. Or, if an error has occurred, free the iterator
** object and set the output variable to NULL. */
if( p->rc==SQLITE_OK ){
- for(iIter=pNew->nSeg-1; iIter>0; iIter--){
- int iEq;
- if( (iEq = fts5MultiIterDoCompare(pNew, iIter)) ){
- Fts5SegIter *pSeg = &pNew->aSeg[iEq];
- if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0);
- fts5MultiIterAdvanced(p, pNew, iEq, iIter);
- }
- }
- fts5MultiIterSetEof(pNew);
- fts5AssertMultiIterSetup(p, pNew);
-
- if( pNew->bSkipEmpty && fts5MultiIterIsEmpty(p, pNew) ){
- fts5MultiIterNext(p, pNew, 0, 0);
- }else if( pNew->base.bEof==0 ){
- Fts5SegIter *pSeg = &pNew->aSeg[pNew->aFirst[1].iFirst];
- pNew->xSetOutputs(pNew, pSeg);
- }
-
+ fts5MultiIterFinishSetup(p, pNew);
}else{
fts5MultiIterFree(pNew);
*ppOut = 0;
@@ -236090,7 +242515,6 @@ static void fts5MultiIterNew2(
pNew = fts5MultiIterAlloc(p, 2);
if( pNew ){
Fts5SegIter *pIter = &pNew->aSeg[1];
-
pIter->flags = FTS5_SEGITER_ONETERM;
if( pData->szLeaf>0 ){
pIter->pLeaf = pData;
@@ -236237,7 +242661,10 @@ static void fts5IndexDiscardData(Fts5Index *p){
if( p->pHash ){
sqlite3Fts5HashClear(p->pHash);
p->nPendingData = 0;
+ p->nPendingRow = 0;
+ p->flushRc = SQLITE_OK;
}
+ p->nContentlessDelete = 0;
}
/*
@@ -236451,7 +242878,7 @@ static void fts5WriteDlidxAppend(
}
if( pDlidx->bPrevValid ){
- iVal = iRowid - pDlidx->iPrev;
+ iVal = (u64)iRowid - (u64)pDlidx->iPrev;
}else{
i64 iPgno = (i==0 ? pWriter->writer.pgno : pDlidx[-1].pgno);
assert( pDlidx->buf.n==0 );
@@ -236638,7 +243065,7 @@ static void fts5WriteAppendPoslistData(
const u8 *a = aData;
int n = nData;
- assert( p->pConfig->pgsz>0 );
+ assert( p->pConfig->pgsz>0 || p->rc!=SQLITE_OK );
while( p->rc==SQLITE_OK
&& (pPage->buf.n + pPage->pgidx.n + n)>=p->pConfig->pgsz
){
@@ -236874,6 +243301,12 @@ static void fts5IndexMergeLevel(
/* Read input from all segments in the input level */
nInput = pLvl->nSeg;
+
+ /* Set the range of origins that will go into the output segment. */
+ if( pStruct->nOriginCntr>0 ){
+ pSeg->iOrigin1 = pLvl->aSeg[0].iOrigin1;
+ pSeg->iOrigin2 = pLvl->aSeg[pLvl->nSeg-1].iOrigin2;
+ }
}
bOldest = (pLvlOut->nSeg==1 && pStruct->nLevel==iLvl+2);
@@ -236933,8 +243366,11 @@ static void fts5IndexMergeLevel(
int i;
/* Remove the redundant segments from the %_data table */
+ assert( pSeg->nEntry==0 );
for(i=0; i<nInput; i++){
- fts5DataRemoveSegment(p, pLvl->aSeg[i].iSegid);
+ Fts5StructureSegment *pOld = &pLvl->aSeg[i];
+ pSeg->nEntry += (pOld->nEntry - pOld->nEntryTombstone);
+ fts5DataRemoveSegment(p, pOld);
}
/* Remove the redundant segments from the input level */
@@ -236961,6 +243397,43 @@ static void fts5IndexMergeLevel(
}
/*
+** If this is not a contentless_delete=1 table, or if the 'deletemerge'
+** configuration option is set to 0, then this function always returns -1.
+** Otherwise, it searches the structure object passed as the second argument
+** for a level suitable for merging due to having a large number of
+** tombstones in the tombstone hash. If one is found, its index is returned.
+** Otherwise, if there is no suitable level, -1.
+*/
+static int fts5IndexFindDeleteMerge(Fts5Index *p, Fts5Structure *pStruct){
+ Fts5Config *pConfig = p->pConfig;
+ int iRet = -1;
+ if( pConfig->bContentlessDelete && pConfig->nDeleteMerge>0 ){
+ int ii;
+ int nBest = 0;
+
+ for(ii=0; ii<pStruct->nLevel; ii++){
+ Fts5StructureLevel *pLvl = &pStruct->aLevel[ii];
+ i64 nEntry = 0;
+ i64 nTomb = 0;
+ int iSeg;
+ for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){
+ nEntry += pLvl->aSeg[iSeg].nEntry;
+ nTomb += pLvl->aSeg[iSeg].nEntryTombstone;
+ }
+ assert_nc( nEntry>0 || pLvl->nSeg==0 );
+ if( nEntry>0 ){
+ int nPercent = (nTomb * 100) / nEntry;
+ if( nPercent>=pConfig->nDeleteMerge && nPercent>nBest ){
+ iRet = ii;
+ nBest = nPercent;
+ }
+ }
+ }
+ }
+ return iRet;
+}
+
+/*
** Do up to nPg pages of automerge work on the index.
**
** Return true if any changes were actually made, or false otherwise.
@@ -236979,14 +243452,15 @@ static int fts5IndexMerge(
int iBestLvl = 0; /* Level offering the most input segments */
int nBest = 0; /* Number of input segments on best level */
- /* Set iBestLvl to the level to read input segments from. */
+ /* Set iBestLvl to the level to read input segments from. Or to -1 if
+ ** there is no level suitable to merge segments from. */
assert( pStruct->nLevel>0 );
for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl];
if( pLvl->nMerge ){
if( pLvl->nMerge>nBest ){
iBestLvl = iLvl;
- nBest = pLvl->nMerge;
+ nBest = nMin;
}
break;
}
@@ -236995,22 +243469,18 @@ static int fts5IndexMerge(
iBestLvl = iLvl;
}
}
-
- /* If nBest is still 0, then the index must be empty. */
-#ifdef SQLITE_DEBUG
- for(iLvl=0; nBest==0 && iLvl<pStruct->nLevel; iLvl++){
- assert( pStruct->aLevel[iLvl].nSeg==0 );
+ if( nBest<nMin ){
+ iBestLvl = fts5IndexFindDeleteMerge(p, pStruct);
}
-#endif
- if( nBest<nMin && pStruct->aLevel[iBestLvl].nMerge==0 ){
- break;
- }
+ if( iBestLvl<0 ) break;
bRet = 1;
fts5IndexMergeLevel(p, &pStruct, iBestLvl, &nRem);
if( p->rc==SQLITE_OK && pStruct->aLevel[iBestLvl].nMerge==0 ){
fts5StructurePromote(p, iBestLvl+1, pStruct);
}
+
+ if( nMin==1 ) nMin = 2;
}
*ppStruct = pStruct;
return bRet;
@@ -237176,7 +243646,7 @@ static void fts5SecureDeleteOverflow(
pLeaf = 0;
}else if( bDetailNone ){
break;
- }else if( iNext>=pLeaf->szLeaf || iNext<4 ){
+ }else if( iNext>=pLeaf->szLeaf || pLeaf->nn<pLeaf->szLeaf || iNext<4 ){
p->rc = FTS5_CORRUPT;
break;
}else{
@@ -237195,9 +243665,13 @@ static void fts5SecureDeleteOverflow(
int i1 = pLeaf->szLeaf;
int i2 = 0;
+ i1 += fts5GetVarint32(&aPg[i1], iFirst);
+ if( iFirst<iNext ){
+ p->rc = FTS5_CORRUPT;
+ break;
+ }
aIdx = sqlite3Fts5MallocZero(&p->rc, (pLeaf->nn-pLeaf->szLeaf)+2);
if( aIdx==0 ) break;
- i1 += fts5GetVarint32(&aPg[i1], iFirst);
i2 = sqlite3Fts5PutVarint(aIdx, iFirst-nShift);
if( i1<pLeaf->nn ){
memcpy(&aIdx[i2], &aPg[i1], pLeaf->nn-i1);
@@ -237242,7 +243716,6 @@ static void fts5DoSecureDelete(
int iPgIdx = pSeg->pLeaf->szLeaf;
u64 iDelta = 0;
- u64 iNextDelta = 0;
int iNextOff = 0;
int iOff = 0;
int nIdx = 0;
@@ -237250,8 +243723,6 @@ static void fts5DoSecureDelete(
int bLastInDoclist = 0;
int iIdx = 0;
int iStart = 0;
- int iKeyOff = 0;
- int iPrevKeyOff = 0;
int iDelKeyOff = 0; /* Offset of deleted key, if any */
nIdx = nPg-iPgIdx;
@@ -237276,10 +243747,21 @@ static void fts5DoSecureDelete(
** This block sets the following variables:
**
** iStart:
+ ** The offset of the first byte of the rowid or delta-rowid
+ ** value for the doclist entry being removed.
+ **
** iDelta:
+ ** The value of the rowid or delta-rowid value for the doclist
+ ** entry being removed.
+ **
+ ** iNextOff:
+ ** The offset of the next entry following the position list
+ ** for the one being removed. If the position list for this
+ ** entry overflows onto the next leaf page, this value will be
+ ** greater than pLeaf->szLeaf.
*/
{
- int iSOP;
+ int iSOP; /* Start-Of-Position-list */
if( pSeg->iLeafPgno==pSeg->iTermLeafPgno ){
iStart = pSeg->iTermLeafOffset;
}else{
@@ -237315,47 +243797,81 @@ static void fts5DoSecureDelete(
}
iOff = iStart;
+
+ /* If the position-list for the entry being removed flows over past
+ ** the end of this page, delete the portion of the position-list on the
+ ** next page and beyond.
+ **
+ ** Set variable bLastInDoclist to true if this entry happens
+ ** to be the last rowid in the doclist for its term. */
if( iNextOff>=iPgIdx ){
int pgno = pSeg->iLeafPgno+1;
fts5SecureDeleteOverflow(p, pSeg->pSeg, pgno, &bLastInDoclist);
iNextOff = iPgIdx;
- }else{
- /* Set bLastInDoclist to true if the entry being removed is the last
- ** in its doclist. */
- for(iIdx=0, iKeyOff=0; iIdx<nIdx; /* no-op */){
- u32 iVal = 0;
- iIdx += fts5GetVarint32(&aIdx[iIdx], iVal);
- iKeyOff += iVal;
- if( iKeyOff==iNextOff ){
- bLastInDoclist = 1;
+ }
+
+ if( pSeg->bDel==0 ){
+ if( iNextOff!=iPgIdx ){
+ /* Loop through the page-footer. If iNextOff (offset of the
+ ** entry following the one we are removing) is equal to the
+ ** offset of a key on this page, then the entry is the last
+ ** in its doclist. */
+ int iKeyOff = 0;
+ for(iIdx=0; iIdx<nIdx; /* no-op */){
+ u32 iVal = 0;
+ iIdx += fts5GetVarint32(&aIdx[iIdx], iVal);
+ iKeyOff += iVal;
+ if( iKeyOff==iNextOff ){
+ bLastInDoclist = 1;
+ }
}
}
- }
- if( fts5GetU16(&aPg[0])==iStart && (bLastInDoclist||iNextOff==iPgIdx) ){
- fts5PutU16(&aPg[0], 0);
+ /* If this is (a) the first rowid on a page and (b) is not followed by
+ ** another position list on the same page, set the "first-rowid" field
+ ** of the header to 0. */
+ if( fts5GetU16(&aPg[0])==iStart && (bLastInDoclist || iNextOff==iPgIdx) ){
+ fts5PutU16(&aPg[0], 0);
+ }
}
- if( bLastInDoclist==0 ){
+ if( pSeg->bDel ){
+ iOff += sqlite3Fts5PutVarint(&aPg[iOff], iDelta);
+ aPg[iOff++] = 0x01;
+ }else if( bLastInDoclist==0 ){
if( iNextOff!=iPgIdx ){
+ u64 iNextDelta = 0;
iNextOff += fts5GetVarint(&aPg[iNextOff], &iNextDelta);
iOff += sqlite3Fts5PutVarint(&aPg[iOff], iDelta + iNextDelta);
}
}else if(
- iStart==pSeg->iTermLeafOffset && pSeg->iLeafPgno==pSeg->iTermLeafPgno
+ pSeg->iLeafPgno==pSeg->iTermLeafPgno
+ && iStart==pSeg->iTermLeafOffset
){
/* The entry being removed was the only position list in its
** doclist. Therefore the term needs to be removed as well. */
int iKey = 0;
- for(iIdx=0, iKeyOff=0; iIdx<nIdx; iKey++){
+ int iKeyOff = 0;
+
+ /* Set iKeyOff to the offset of the term that will be removed - the
+ ** last offset in the footer that is not greater than iStart. */
+ for(iIdx=0; iIdx<nIdx; iKey++){
u32 iVal = 0;
iIdx += fts5GetVarint32(&aIdx[iIdx], iVal);
if( (iKeyOff+iVal)>(u32)iStart ) break;
iKeyOff += iVal;
}
+ assert_nc( iKey>=1 );
+ /* Set iDelKeyOff to the value of the footer entry to remove from
+ ** the page. */
iDelKeyOff = iOff = iKeyOff;
+
if( iNextOff!=iPgIdx ){
+ /* This is the only position-list associated with the term, and there
+ ** is another term following it on this page. So the subsequent term
+ ** needs to be moved to replace the term associated with the entry
+ ** being removed. */
int nPrefix = 0;
int nSuffix = 0;
int nPrefix2 = 0;
@@ -237380,7 +243896,9 @@ static void fts5DoSecureDelete(
iOff += sqlite3Fts5PutVarint(&aPg[iOff], nPrefix);
}
iOff += sqlite3Fts5PutVarint(&aPg[iOff], nSuffix);
- if( nPrefix2>nPrefix ){
+ if( nPrefix2>pSeg->term.n ){
+ p->rc = FTS5_CORRUPT;
+ }else if( nPrefix2>nPrefix ){
memcpy(&aPg[iOff], &pSeg->term.p[nPrefix], nPrefix2-nPrefix);
iOff += (nPrefix2-nPrefix);
}
@@ -237390,80 +243908,88 @@ static void fts5DoSecureDelete(
}
}
}else if( iStart==4 ){
- int iPgno;
-
- assert_nc( pSeg->iLeafPgno>pSeg->iTermLeafPgno );
- /* The entry being removed may be the only position list in
- ** its doclist. */
- for(iPgno=pSeg->iLeafPgno-1; iPgno>pSeg->iTermLeafPgno; iPgno-- ){
- Fts5Data *pPg = fts5DataRead(p, FTS5_SEGMENT_ROWID(iSegid, iPgno));
- int bEmpty = (pPg && pPg->nn==4);
- fts5DataRelease(pPg);
- if( bEmpty==0 ) break;
- }
-
- if( iPgno==pSeg->iTermLeafPgno ){
- i64 iId = FTS5_SEGMENT_ROWID(iSegid, pSeg->iTermLeafPgno);
- Fts5Data *pTerm = fts5DataRead(p, iId);
- if( pTerm && pTerm->szLeaf==pSeg->iTermLeafOffset ){
- u8 *aTermIdx = &pTerm->p[pTerm->szLeaf];
- int nTermIdx = pTerm->nn - pTerm->szLeaf;
- int iTermIdx = 0;
- int iTermOff = 0;
-
- while( 1 ){
- u32 iVal = 0;
- int nByte = fts5GetVarint32(&aTermIdx[iTermIdx], iVal);
- iTermOff += iVal;
- if( (iTermIdx+nByte)>=nTermIdx ) break;
- iTermIdx += nByte;
- }
- nTermIdx = iTermIdx;
+ int iPgno;
+
+ assert_nc( pSeg->iLeafPgno>pSeg->iTermLeafPgno );
+ /* The entry being removed may be the only position list in
+ ** its doclist. */
+ for(iPgno=pSeg->iLeafPgno-1; iPgno>pSeg->iTermLeafPgno; iPgno-- ){
+ Fts5Data *pPg = fts5DataRead(p, FTS5_SEGMENT_ROWID(iSegid, iPgno));
+ int bEmpty = (pPg && pPg->nn==4);
+ fts5DataRelease(pPg);
+ if( bEmpty==0 ) break;
+ }
+
+ if( iPgno==pSeg->iTermLeafPgno ){
+ i64 iId = FTS5_SEGMENT_ROWID(iSegid, pSeg->iTermLeafPgno);
+ Fts5Data *pTerm = fts5DataRead(p, iId);
+ if( pTerm && pTerm->szLeaf==pSeg->iTermLeafOffset ){
+ u8 *aTermIdx = &pTerm->p[pTerm->szLeaf];
+ int nTermIdx = pTerm->nn - pTerm->szLeaf;
+ int iTermIdx = 0;
+ int iTermOff = 0;
+
+ while( 1 ){
+ u32 iVal = 0;
+ int nByte = fts5GetVarint32(&aTermIdx[iTermIdx], iVal);
+ iTermOff += iVal;
+ if( (iTermIdx+nByte)>=nTermIdx ) break;
+ iTermIdx += nByte;
+ }
+ nTermIdx = iTermIdx;
- memmove(&pTerm->p[iTermOff], &pTerm->p[pTerm->szLeaf], nTermIdx);
- fts5PutU16(&pTerm->p[2], iTermOff);
+ memmove(&pTerm->p[iTermOff], &pTerm->p[pTerm->szLeaf], nTermIdx);
+ fts5PutU16(&pTerm->p[2], iTermOff);
- fts5DataWrite(p, iId, pTerm->p, iTermOff+nTermIdx);
- if( nTermIdx==0 ){
- fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iTermLeafPgno);
- }
+ fts5DataWrite(p, iId, pTerm->p, iTermOff+nTermIdx);
+ if( nTermIdx==0 ){
+ fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iTermLeafPgno);
}
- fts5DataRelease(pTerm);
}
+ fts5DataRelease(pTerm);
}
+ }
- if( p->rc==SQLITE_OK ){
- const int nMove = nPg - iNextOff;
- int nShift = 0;
+ /* Assuming no error has occurred, this block does final edits to the
+ ** leaf page before writing it back to disk. Input variables are:
+ **
+ ** nPg: Total initial size of leaf page.
+ ** iPgIdx: Initial offset of page footer.
+ **
+ ** iOff: Offset to move data to
+ ** iNextOff: Offset to move data from
+ */
+ if( p->rc==SQLITE_OK ){
+ const int nMove = nPg - iNextOff; /* Number of bytes to move */
+ int nShift = iNextOff - iOff; /* Distance to move them */
- memmove(&aPg[iOff], &aPg[iNextOff], nMove);
- iPgIdx -= (iNextOff - iOff);
- nPg = iPgIdx;
- fts5PutU16(&aPg[2], iPgIdx);
+ int iPrevKeyOut = 0;
+ int iKeyIn = 0;
- nShift = iNextOff - iOff;
- for(iIdx=0, iKeyOff=0, iPrevKeyOff=0; iIdx<nIdx; /* no-op */){
- u32 iVal = 0;
- iIdx += fts5GetVarint32(&aIdx[iIdx], iVal);
- iKeyOff += iVal;
- if( iKeyOff!=iDelKeyOff ){
- if( iKeyOff>iOff ){
- iKeyOff -= nShift;
- nShift = 0;
- }
- nPg += sqlite3Fts5PutVarint(&aPg[nPg], iKeyOff - iPrevKeyOff);
- iPrevKeyOff = iKeyOff;
- }
- }
+ memmove(&aPg[iOff], &aPg[iNextOff], nMove);
+ iPgIdx -= nShift;
+ nPg = iPgIdx;
+ fts5PutU16(&aPg[2], iPgIdx);
- if( iPgIdx==nPg && nIdx>0 && pSeg->iLeafPgno!=1 ){
- fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iLeafPgno);
+ for(iIdx=0; iIdx<nIdx; /* no-op */){
+ u32 iVal = 0;
+ iIdx += fts5GetVarint32(&aIdx[iIdx], iVal);
+ iKeyIn += iVal;
+ if( iKeyIn!=iDelKeyOff ){
+ int iKeyOut = (iKeyIn - (iKeyIn>iOff ? nShift : 0));
+ nPg += sqlite3Fts5PutVarint(&aPg[nPg], iKeyOut - iPrevKeyOut);
+ iPrevKeyOut = iKeyOut;
}
+ }
- assert_nc( nPg>4 || fts5GetU16(aPg)==0 );
- fts5DataWrite(p, FTS5_SEGMENT_ROWID(iSegid,pSeg->iLeafPgno), aPg,nPg);
+ if( iPgIdx==nPg && nIdx>0 && pSeg->iLeafPgno!=1 ){
+ fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iLeafPgno);
}
- sqlite3_free(aIdx);
+
+ assert_nc( nPg>4 || fts5GetU16(aPg)==0 );
+ fts5DataWrite(p, FTS5_SEGMENT_ROWID(iSegid,pSeg->iLeafPgno), aPg, nPg);
+ }
+ sqlite3_free(aIdx);
}
/*
@@ -237475,10 +244001,10 @@ static void fts5FlushSecureDelete(
Fts5Index *p,
Fts5Structure *pStruct,
const char *zTerm,
+ int nTerm,
i64 iRowid
){
const int f = FTS5INDEX_QUERY_SKIPHASH;
- int nTerm = (int)strlen(zTerm);
Fts5Iter *pIter = 0; /* Used to find term instance */
fts5MultiIterNew(p, pStruct, f, 0, (const u8*)zTerm, nTerm, -1, 0, &pIter);
@@ -237517,184 +244043,197 @@ static void fts5FlushOneHash(Fts5Index *p){
/* Obtain a reference to the index structure and allocate a new segment-id
** for the new level-0 segment. */
pStruct = fts5StructureRead(p);
- iSegid = fts5AllocateSegid(p, pStruct);
fts5StructureInvalidate(p);
- if( iSegid ){
- const int pgsz = p->pConfig->pgsz;
- int eDetail = p->pConfig->eDetail;
- int bSecureDelete = p->pConfig->bSecureDelete;
- Fts5StructureSegment *pSeg; /* New segment within pStruct */
- Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */
- Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */
-
- Fts5SegWriter writer;
- fts5WriteInit(p, &writer, iSegid);
-
- pBuf = &writer.writer.buf;
- pPgidx = &writer.writer.pgidx;
-
- /* fts5WriteInit() should have initialized the buffers to (most likely)
- ** the maximum space required. */
- assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) );
- assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) );
+ if( sqlite3Fts5HashIsEmpty(pHash)==0 ){
+ iSegid = fts5AllocateSegid(p, pStruct);
+ if( iSegid ){
+ const int pgsz = p->pConfig->pgsz;
+ int eDetail = p->pConfig->eDetail;
+ int bSecureDelete = p->pConfig->bSecureDelete;
+ Fts5StructureSegment *pSeg; /* New segment within pStruct */
+ Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */
+ Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */
+
+ Fts5SegWriter writer;
+ fts5WriteInit(p, &writer, iSegid);
+
+ pBuf = &writer.writer.buf;
+ pPgidx = &writer.writer.pgidx;
+
+ /* fts5WriteInit() should have initialized the buffers to (most likely)
+ ** the maximum space required. */
+ assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) );
+ assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) );
+
+ /* Begin scanning through hash table entries. This loop runs once for each
+ ** term/doclist currently stored within the hash table. */
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0);
+ }
+ while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){
+ const char *zTerm; /* Buffer containing term */
+ int nTerm; /* Size of zTerm in bytes */
+ const u8 *pDoclist; /* Pointer to doclist for this term */
+ int nDoclist; /* Size of doclist in bytes */
+
+ /* Get the term and doclist for this entry. */
+ sqlite3Fts5HashScanEntry(pHash, &zTerm, &nTerm, &pDoclist, &nDoclist);
+ if( bSecureDelete==0 ){
+ fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
+ if( p->rc!=SQLITE_OK ) break;
+ assert( writer.bFirstRowidInPage==0 );
+ }
- /* Begin scanning through hash table entries. This loop runs once for each
- ** term/doclist currently stored within the hash table. */
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0);
- }
- while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){
- const char *zTerm; /* Buffer containing term */
- int nTerm; /* Size of zTerm in bytes */
- const u8 *pDoclist; /* Pointer to doclist for this term */
- int nDoclist; /* Size of doclist in bytes */
-
- /* Get the term and doclist for this entry. */
- sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist);
- nTerm = (int)strlen(zTerm);
- if( bSecureDelete==0 ){
- fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
- if( p->rc!=SQLITE_OK ) break;
- assert( writer.bFirstRowidInPage==0 );
- }
-
- if( !bSecureDelete && pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){
- /* The entire doclist will fit on the current leaf. */
- fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist);
- }else{
- int bTermWritten = !bSecureDelete;
- i64 iRowid = 0;
- i64 iPrev = 0;
- int iOff = 0;
-
- /* The entire doclist will not fit on this leaf. The following
- ** loop iterates through the poslists that make up the current
- ** doclist. */
- while( p->rc==SQLITE_OK && iOff<nDoclist ){
- u64 iDelta = 0;
- iOff += fts5GetVarint(&pDoclist[iOff], &iDelta);
- iRowid += iDelta;
-
- /* If in secure delete mode, and if this entry in the poslist is
- ** in fact a delete, then edit the existing segments directly
- ** using fts5FlushSecureDelete(). */
- if( bSecureDelete ){
- if( eDetail==FTS5_DETAIL_NONE ){
- if( iOff<nDoclist && pDoclist[iOff]==0x00 ){
- fts5FlushSecureDelete(p, pStruct, zTerm, iRowid);
- iOff++;
+ if( !bSecureDelete && pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){
+ /* The entire doclist will fit on the current leaf. */
+ fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist);
+ }else{
+ int bTermWritten = !bSecureDelete;
+ i64 iRowid = 0;
+ i64 iPrev = 0;
+ int iOff = 0;
+
+ /* The entire doclist will not fit on this leaf. The following
+ ** loop iterates through the poslists that make up the current
+ ** doclist. */
+ while( p->rc==SQLITE_OK && iOff<nDoclist ){
+ u64 iDelta = 0;
+ iOff += fts5GetVarint(&pDoclist[iOff], &iDelta);
+ iRowid += iDelta;
+
+ /* If in secure delete mode, and if this entry in the poslist is
+ ** in fact a delete, then edit the existing segments directly
+ ** using fts5FlushSecureDelete(). */
+ if( bSecureDelete ){
+ if( eDetail==FTS5_DETAIL_NONE ){
if( iOff<nDoclist && pDoclist[iOff]==0x00 ){
+ fts5FlushSecureDelete(p, pStruct, zTerm, nTerm, iRowid);
+ iOff++;
+ if( iOff<nDoclist && pDoclist[iOff]==0x00 ){
+ iOff++;
+ nDoclist = 0;
+ }else{
+ continue;
+ }
+ }
+ }else if( (pDoclist[iOff] & 0x01) ){
+ fts5FlushSecureDelete(p, pStruct, zTerm, nTerm, iRowid);
+ if( p->rc!=SQLITE_OK || pDoclist[iOff]==0x01 ){
iOff++;
- nDoclist = 0;
- }else{
continue;
}
}
- }else if( (pDoclist[iOff] & 0x01) ){
- fts5FlushSecureDelete(p, pStruct, zTerm, iRowid);
- if( p->rc!=SQLITE_OK || pDoclist[iOff]==0x01 ){
- iOff++;
- continue;
- }
}
- }
- if( p->rc==SQLITE_OK && bTermWritten==0 ){
- fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
- bTermWritten = 1;
- assert( p->rc!=SQLITE_OK || writer.bFirstRowidInPage==0 );
- }
+ if( p->rc==SQLITE_OK && bTermWritten==0 ){
+ fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
+ bTermWritten = 1;
+ assert( p->rc!=SQLITE_OK || writer.bFirstRowidInPage==0 );
+ }
- if( writer.bFirstRowidInPage ){
- fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */
- pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid);
- writer.bFirstRowidInPage = 0;
- fts5WriteDlidxAppend(p, &writer, iRowid);
- }else{
- pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid-iPrev);
- }
- if( p->rc!=SQLITE_OK ) break;
- assert( pBuf->n<=pBuf->nSpace );
- iPrev = iRowid;
+ if( writer.bFirstRowidInPage ){
+ fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */
+ pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid);
+ writer.bFirstRowidInPage = 0;
+ fts5WriteDlidxAppend(p, &writer, iRowid);
+ }else{
+ u64 iRowidDelta = (u64)iRowid - (u64)iPrev;
+ pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowidDelta);
+ }
+ if( p->rc!=SQLITE_OK ) break;
+ assert( pBuf->n<=pBuf->nSpace );
+ iPrev = iRowid;
- if( eDetail==FTS5_DETAIL_NONE ){
- if( iOff<nDoclist && pDoclist[iOff]==0 ){
- pBuf->p[pBuf->n++] = 0;
- iOff++;
+ if( eDetail==FTS5_DETAIL_NONE ){
if( iOff<nDoclist && pDoclist[iOff]==0 ){
pBuf->p[pBuf->n++] = 0;
iOff++;
+ if( iOff<nDoclist && pDoclist[iOff]==0 ){
+ pBuf->p[pBuf->n++] = 0;
+ iOff++;
+ }
+ }
+ if( (pBuf->n + pPgidx->n)>=pgsz ){
+ fts5WriteFlushLeaf(p, &writer);
}
- }
- if( (pBuf->n + pPgidx->n)>=pgsz ){
- fts5WriteFlushLeaf(p, &writer);
- }
- }else{
- int bDummy;
- int nPos;
- int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDummy);
- nCopy += nPos;
- if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){
- /* The entire poslist will fit on the current leaf. So copy
- ** it in one go. */
- fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy);
}else{
- /* The entire poslist will not fit on this leaf. So it needs
- ** to be broken into sections. The only qualification being
- ** that each varint must be stored contiguously. */
- const u8 *pPoslist = &pDoclist[iOff];
- int iPos = 0;
- while( p->rc==SQLITE_OK ){
- int nSpace = pgsz - pBuf->n - pPgidx->n;
- int n = 0;
- if( (nCopy - iPos)<=nSpace ){
- n = nCopy - iPos;
- }else{
- n = fts5PoslistPrefix(&pPoslist[iPos], nSpace);
- }
- assert( n>0 );
- fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n);
- iPos += n;
- if( (pBuf->n + pPgidx->n)>=pgsz ){
- fts5WriteFlushLeaf(p, &writer);
+ int bDel = 0;
+ int nPos = 0;
+ int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDel);
+ if( bDel && bSecureDelete ){
+ fts5BufferAppendVarint(&p->rc, pBuf, nPos*2);
+ iOff += nCopy;
+ nCopy = nPos;
+ }else{
+ nCopy += nPos;
+ }
+ if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){
+ /* The entire poslist will fit on the current leaf. So copy
+ ** it in one go. */
+ fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy);
+ }else{
+ /* The entire poslist will not fit on this leaf. So it needs
+ ** to be broken into sections. The only qualification being
+ ** that each varint must be stored contiguously. */
+ const u8 *pPoslist = &pDoclist[iOff];
+ int iPos = 0;
+ while( p->rc==SQLITE_OK ){
+ int nSpace = pgsz - pBuf->n - pPgidx->n;
+ int n = 0;
+ if( (nCopy - iPos)<=nSpace ){
+ n = nCopy - iPos;
+ }else{
+ n = fts5PoslistPrefix(&pPoslist[iPos], nSpace);
+ }
+ assert( n>0 );
+ fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n);
+ iPos += n;
+ if( (pBuf->n + pPgidx->n)>=pgsz ){
+ fts5WriteFlushLeaf(p, &writer);
+ }
+ if( iPos>=nCopy ) break;
}
- if( iPos>=nCopy ) break;
}
+ iOff += nCopy;
}
- iOff += nCopy;
}
}
- }
-
- /* TODO2: Doclist terminator written here. */
- /* pBuf->p[pBuf->n++] = '\0'; */
- assert( pBuf->n<=pBuf->nSpace );
- if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash);
- }
- sqlite3Fts5HashClear(pHash);
- fts5WriteFinish(p, &writer, &pgnoLast);
- assert( p->rc!=SQLITE_OK || bSecureDelete || pgnoLast>0 );
- if( pgnoLast>0 ){
- /* Update the Fts5Structure. It is written back to the database by the
- ** fts5StructureRelease() call below. */
- if( pStruct->nLevel==0 ){
- fts5StructureAddLevel(&p->rc, &pStruct);
+ /* TODO2: Doclist terminator written here. */
+ /* pBuf->p[pBuf->n++] = '\0'; */
+ assert( pBuf->n<=pBuf->nSpace );
+ if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash);
}
- fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0);
- if( p->rc==SQLITE_OK ){
- pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ];
- pSeg->iSegid = iSegid;
- pSeg->pgnoFirst = 1;
- pSeg->pgnoLast = pgnoLast;
- pStruct->nSegment++;
+ fts5WriteFinish(p, &writer, &pgnoLast);
+
+ assert( p->rc!=SQLITE_OK || bSecureDelete || pgnoLast>0 );
+ if( pgnoLast>0 ){
+ /* Update the Fts5Structure. It is written back to the database by the
+ ** fts5StructureRelease() call below. */
+ if( pStruct->nLevel==0 ){
+ fts5StructureAddLevel(&p->rc, &pStruct);
+ }
+ fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0);
+ if( p->rc==SQLITE_OK ){
+ pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ];
+ pSeg->iSegid = iSegid;
+ pSeg->pgnoFirst = 1;
+ pSeg->pgnoLast = pgnoLast;
+ if( pStruct->nOriginCntr>0 ){
+ pSeg->iOrigin1 = pStruct->nOriginCntr;
+ pSeg->iOrigin2 = pStruct->nOriginCntr;
+ pSeg->nEntry = p->nPendingRow;
+ pStruct->nOriginCntr++;
+ }
+ pStruct->nSegment++;
+ }
+ fts5StructurePromote(p, 0, pStruct);
}
- fts5StructurePromote(p, 0, pStruct);
}
}
- fts5IndexAutomerge(p, &pStruct, pgnoLast);
+ fts5IndexAutomerge(p, &pStruct, pgnoLast + p->nContentlessDelete);
fts5IndexCrisismerge(p, &pStruct);
fts5StructureWrite(p, pStruct);
fts5StructureRelease(pStruct);
@@ -237705,10 +244244,21 @@ static void fts5FlushOneHash(Fts5Index *p){
*/
static void fts5IndexFlush(Fts5Index *p){
/* Unless it is empty, flush the hash table to disk */
- if( p->nPendingData ){
+ if( p->flushRc ){
+ p->rc = p->flushRc;
+ return;
+ }
+ if( p->nPendingData || p->nContentlessDelete ){
assert( p->pHash );
- p->nPendingData = 0;
fts5FlushOneHash(p);
+ if( p->rc==SQLITE_OK ){
+ sqlite3Fts5HashClear(p->pHash);
+ p->nPendingData = 0;
+ p->nPendingRow = 0;
+ p->nContentlessDelete = 0;
+ }else if( p->nPendingData || p->nContentlessDelete ){
+ p->flushRc = p->rc;
+ }
}
}
@@ -237724,17 +244274,22 @@ static Fts5Structure *fts5IndexOptimizeStruct(
/* Figure out if this structure requires optimization. A structure does
** not require optimization if either:
**
- ** + it consists of fewer than two segments, or
- ** + all segments are on the same level, or
- ** + all segments except one are currently inputs to a merge operation.
+ ** 1. it consists of fewer than two segments, or
+ ** 2. all segments are on the same level, or
+ ** 3. all segments except one are currently inputs to a merge operation.
**
- ** In the first case, return NULL. In the second, increment the ref-count
- ** on *pStruct and return a copy of the pointer to it.
+ ** In the first case, if there are no tombstone hash pages, return NULL. In
+ ** the second, increment the ref-count on *pStruct and return a copy of the
+ ** pointer to it.
*/
- if( nSeg<2 ) return 0;
+ if( nSeg==0 ) return 0;
for(i=0; i<pStruct->nLevel; i++){
int nThis = pStruct->aLevel[i].nSeg;
- if( nThis==nSeg || (nThis==nSeg-1 && pStruct->aLevel[i].nMerge==nThis) ){
+ int nMerge = pStruct->aLevel[i].nMerge;
+ if( nThis>0 && (nThis==nSeg || (nThis==nSeg-1 && nMerge==nThis)) ){
+ if( nSeg==1 && nThis==1 && pStruct->aLevel[i].aSeg[0].nPgTombstone==0 ){
+ return 0;
+ }
fts5StructureRef(pStruct);
return pStruct;
}
@@ -237750,6 +244305,7 @@ static Fts5Structure *fts5IndexOptimizeStruct(
pNew->nLevel = MIN(pStruct->nLevel+1, FTS5_MAX_LEVEL);
pNew->nRef = 1;
pNew->nWriteCounter = pStruct->nWriteCounter;
+ pNew->nOriginCntr = pStruct->nOriginCntr;
pLvl = &pNew->aLevel[pNew->nLevel-1];
pLvl->aSeg = (Fts5StructureSegment*)sqlite3Fts5MallocZero(&p->rc, nByte);
if( pLvl->aSeg ){
@@ -237780,7 +244336,9 @@ static int sqlite3Fts5IndexOptimize(Fts5Index *p){
assert( p->rc==SQLITE_OK );
fts5IndexFlush(p);
+ assert( p->rc!=SQLITE_OK || p->nContentlessDelete==0 );
pStruct = fts5StructureRead(p);
+ assert( p->rc!=SQLITE_OK || pStruct!=0 );
fts5StructureInvalidate(p);
if( pStruct ){
@@ -237809,7 +244367,10 @@ static int sqlite3Fts5IndexOptimize(Fts5Index *p){
** INSERT command.
*/
static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){
- Fts5Structure *pStruct = fts5StructureRead(p);
+ Fts5Structure *pStruct = 0;
+
+ fts5IndexFlush(p);
+ pStruct = fts5StructureRead(p);
if( pStruct ){
int nMin = p->pConfig->nUsermerge;
fts5StructureInvalidate(p);
@@ -237817,7 +244378,7 @@ static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){
Fts5Structure *pNew = fts5IndexOptimizeStruct(p, pStruct);
fts5StructureRelease(pStruct);
pStruct = pNew;
- nMin = 2;
+ nMin = 1;
nMerge = nMerge*-1;
}
if( pStruct && pStruct->nLevel ){
@@ -238183,7 +244744,7 @@ static void fts5SetupPrefixIter(
u8 *pToken, /* Buffer containing prefix to match */
int nToken, /* Size of buffer pToken in bytes */
Fts5Colset *pColset, /* Restrict matches to these columns */
- Fts5Iter **ppIter /* OUT: New iterator */
+ Fts5Iter **ppIter /* OUT: New iterator */
){
Fts5Structure *pStruct;
Fts5Buffer *aBuf;
@@ -238204,8 +244765,9 @@ static void fts5SetupPrefixIter(
aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf);
pStruct = fts5StructureRead(p);
+ assert( p->rc!=SQLITE_OK || (aBuf && pStruct) );
- if( aBuf && pStruct ){
+ if( p->rc==SQLITE_OK ){
const int flags = FTS5INDEX_QUERY_SCAN
| FTS5INDEX_QUERY_SKIPEMPTY
| FTS5INDEX_QUERY_NOOUTPUT;
@@ -238217,6 +244779,12 @@ static void fts5SetupPrefixIter(
int bNewTerm = 1;
memset(&doclist, 0, sizeof(doclist));
+
+ /* If iIdx is non-zero, then it is the number of a prefix-index for
+ ** prefixes 1 character longer than the prefix being queried for. That
+ ** index contains all the doclists required, except for the one
+ ** corresponding to the prefix itself. That one is extracted from the
+ ** main term index here. */
if( iIdx!=0 ){
int dummy = 0;
const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT;
@@ -238240,6 +244808,7 @@ static void fts5SetupPrefixIter(
pToken[0] = FTS5_MAIN_PREFIX + iIdx;
fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1);
fts5IterSetOutputCb(&p->rc, p1);
+
for( /* no-op */ ;
fts5MultiIterEof(p, p1)==0;
fts5MultiIterNext2(p, p1, &bNewTerm)
@@ -238255,7 +244824,6 @@ static void fts5SetupPrefixIter(
}
if( p1->base.nData==0 ) continue;
-
if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){
for(i=0; p->rc==SQLITE_OK && doclist.n; i++){
int i1 = i*nMerge;
@@ -238294,7 +244862,7 @@ static void fts5SetupPrefixIter(
}
fts5MultiIterFree(p1);
- pData = fts5IdxMalloc(p, sizeof(Fts5Data)+doclist.n+FTS5_DATA_ZERO_PADDING);
+ pData = fts5IdxMalloc(p, sizeof(*pData)+doclist.n+FTS5_DATA_ZERO_PADDING);
if( pData ){
pData->p = (u8*)&pData[1];
pData->nn = pData->szLeaf = doclist.n;
@@ -238331,6 +244899,9 @@ static int sqlite3Fts5IndexBeginWrite(Fts5Index *p, int bDelete, i64 iRowid){
p->iWriteRowid = iRowid;
p->bDelete = bDelete;
+ if( bDelete==0 ){
+ p->nPendingRow++;
+ }
return fts5IndexReturn(p);
}
@@ -238368,6 +244939,9 @@ static int sqlite3Fts5IndexReinit(Fts5Index *p){
fts5StructureInvalidate(p);
fts5IndexDiscardData(p);
memset(&s, 0, sizeof(Fts5Structure));
+ if( p->pConfig->bContentlessDelete ){
+ s.nOriginCntr = 1;
+ }
fts5DataWrite(p, FTS5_AVERAGES_ROWID, (const u8*)"", 0);
fts5StructureWrite(p, &s);
return fts5IndexReturn(p);
@@ -238431,6 +245005,7 @@ static int sqlite3Fts5IndexClose(Fts5Index *p){
sqlite3_finalize(p->pIdxWriter);
sqlite3_finalize(p->pIdxDeleter);
sqlite3_finalize(p->pIdxSelect);
+ sqlite3_finalize(p->pIdxNextSelect);
sqlite3_finalize(p->pDataVersion);
sqlite3_finalize(p->pDeleteFromIdx);
sqlite3Fts5HashFree(p->pHash);
@@ -238527,6 +245102,454 @@ static int sqlite3Fts5IndexWrite(
}
/*
+** pToken points to a buffer of size nToken bytes containing a search
+** term, including the index number at the start, used on a tokendata=1
+** table. This function returns true if the term in buffer pBuf matches
+** token pToken/nToken.
+*/
+static int fts5IsTokendataPrefix(
+ Fts5Buffer *pBuf,
+ const u8 *pToken,
+ int nToken
+){
+ return (
+ pBuf->n>=nToken
+ && 0==memcmp(pBuf->p, pToken, nToken)
+ && (pBuf->n==nToken || pBuf->p[nToken]==0x00)
+ );
+}
+
+/*
+** Ensure the segment-iterator passed as the only argument points to EOF.
+*/
+static void fts5SegIterSetEOF(Fts5SegIter *pSeg){
+ fts5DataRelease(pSeg->pLeaf);
+ pSeg->pLeaf = 0;
+}
+
+/*
+** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an
+** array of these for each row it visits. Or, for an iterator used by an
+** "ORDER BY rank" query, it accumulates an array of these for the entire
+** query.
+**
+** Each instance in the array indicates the iterator (and therefore term)
+** associated with position iPos of rowid iRowid. This is used by the
+** xInstToken() API.
+*/
+struct Fts5TokenDataMap {
+ i64 iRowid; /* Row this token is located in */
+ i64 iPos; /* Position of token */
+ int iIter; /* Iterator token was read from */
+};
+
+/*
+** An object used to supplement Fts5Iter for tokendata=1 iterators.
+*/
+struct Fts5TokenDataIter {
+ int nIter;
+ int nIterAlloc;
+
+ int nMap;
+ int nMapAlloc;
+ Fts5TokenDataMap *aMap;
+
+ Fts5PoslistReader *aPoslistReader;
+ int *aPoslistToIter;
+ Fts5Iter *apIter[1];
+};
+
+/*
+** This function appends iterator pAppend to Fts5TokenDataIter pIn and
+** returns the result.
+*/
+static Fts5TokenDataIter *fts5AppendTokendataIter(
+ Fts5Index *p, /* Index object (for error code) */
+ Fts5TokenDataIter *pIn, /* Current Fts5TokenDataIter struct */
+ Fts5Iter *pAppend /* Append this iterator */
+){
+ Fts5TokenDataIter *pRet = pIn;
+
+ if( p->rc==SQLITE_OK ){
+ if( pIn==0 || pIn->nIter==pIn->nIterAlloc ){
+ int nAlloc = pIn ? pIn->nIterAlloc*2 : 16;
+ int nByte = nAlloc * sizeof(Fts5Iter*) + sizeof(Fts5TokenDataIter);
+ Fts5TokenDataIter *pNew = (Fts5TokenDataIter*)sqlite3_realloc(pIn, nByte);
+
+ if( pNew==0 ){
+ p->rc = SQLITE_NOMEM;
+ }else{
+ if( pIn==0 ) memset(pNew, 0, nByte);
+ pRet = pNew;
+ pNew->nIterAlloc = nAlloc;
+ }
+ }
+ }
+ if( p->rc ){
+ sqlite3Fts5IterClose((Fts5IndexIter*)pAppend);
+ }else{
+ pRet->apIter[pRet->nIter++] = pAppend;
+ }
+ assert( pRet==0 || pRet->nIter<=pRet->nIterAlloc );
+
+ return pRet;
+}
+
+/*
+** Delete an Fts5TokenDataIter structure and its contents.
+*/
+static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){
+ if( pSet ){
+ int ii;
+ for(ii=0; ii<pSet->nIter; ii++){
+ fts5MultiIterFree(pSet->apIter[ii]);
+ }
+ sqlite3_free(pSet->aPoslistReader);
+ sqlite3_free(pSet->aMap);
+ sqlite3_free(pSet);
+ }
+}
+
+/*
+** Append a mapping to the token-map belonging to object pT.
+*/
+static void fts5TokendataIterAppendMap(
+ Fts5Index *p,
+ Fts5TokenDataIter *pT,
+ int iIter,
+ i64 iRowid,
+ i64 iPos
+){
+ if( p->rc==SQLITE_OK ){
+ if( pT->nMap==pT->nMapAlloc ){
+ int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64;
+ int nByte = nNew * sizeof(Fts5TokenDataMap);
+ Fts5TokenDataMap *aNew;
+
+ aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nByte);
+ if( aNew==0 ){
+ p->rc = SQLITE_NOMEM;
+ return;
+ }
+
+ pT->aMap = aNew;
+ pT->nMapAlloc = nNew;
+ }
+
+ pT->aMap[pT->nMap].iRowid = iRowid;
+ pT->aMap[pT->nMap].iPos = iPos;
+ pT->aMap[pT->nMap].iIter = iIter;
+ pT->nMap++;
+ }
+}
+
+/*
+** The iterator passed as the only argument must be a tokendata=1 iterator
+** (pIter->pTokenDataIter!=0). This function sets the iterator output
+** variables (pIter->base.*) according to the contents of the current
+** row.
+*/
+static void fts5IterSetOutputsTokendata(Fts5Iter *pIter){
+ int ii;
+ int nHit = 0;
+ i64 iRowid = SMALLEST_INT64;
+ int iMin = 0;
+
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+
+ pIter->base.nData = 0;
+ pIter->base.pData = 0;
+
+ for(ii=0; ii<pT->nIter; ii++){
+ Fts5Iter *p = pT->apIter[ii];
+ if( p->base.bEof==0 ){
+ if( nHit==0 || p->base.iRowid<iRowid ){
+ iRowid = p->base.iRowid;
+ nHit = 1;
+ pIter->base.pData = p->base.pData;
+ pIter->base.nData = p->base.nData;
+ iMin = ii;
+ }else if( p->base.iRowid==iRowid ){
+ nHit++;
+ }
+ }
+ }
+
+ if( nHit==0 ){
+ pIter->base.bEof = 1;
+ }else{
+ int eDetail = pIter->pIndex->pConfig->eDetail;
+ pIter->base.bEof = 0;
+ pIter->base.iRowid = iRowid;
+
+ if( nHit==1 && eDetail==FTS5_DETAIL_FULL ){
+ fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, iRowid, -1);
+ }else
+ if( nHit>1 && eDetail!=FTS5_DETAIL_NONE ){
+ int nReader = 0;
+ int nByte = 0;
+ i64 iPrev = 0;
+
+ /* Allocate array of iterators if they are not already allocated. */
+ if( pT->aPoslistReader==0 ){
+ pT->aPoslistReader = (Fts5PoslistReader*)sqlite3Fts5MallocZero(
+ &pIter->pIndex->rc,
+ pT->nIter * (sizeof(Fts5PoslistReader) + sizeof(int))
+ );
+ if( pT->aPoslistReader==0 ) return;
+ pT->aPoslistToIter = (int*)&pT->aPoslistReader[pT->nIter];
+ }
+
+ /* Populate an iterator for each poslist that will be merged */
+ for(ii=0; ii<pT->nIter; ii++){
+ Fts5Iter *p = pT->apIter[ii];
+ if( iRowid==p->base.iRowid ){
+ pT->aPoslistToIter[nReader] = ii;
+ sqlite3Fts5PoslistReaderInit(
+ p->base.pData, p->base.nData, &pT->aPoslistReader[nReader++]
+ );
+ nByte += p->base.nData;
+ }
+ }
+
+ /* Ensure the output buffer is large enough */
+ if( fts5BufferGrow(&pIter->pIndex->rc, &pIter->poslist, nByte+nHit*10) ){
+ return;
+ }
+
+ /* Ensure the token-mapping is large enough */
+ if( eDetail==FTS5_DETAIL_FULL && pT->nMapAlloc<(pT->nMap + nByte) ){
+ int nNew = (pT->nMapAlloc + nByte) * 2;
+ Fts5TokenDataMap *aNew = (Fts5TokenDataMap*)sqlite3_realloc(
+ pT->aMap, nNew*sizeof(Fts5TokenDataMap)
+ );
+ if( aNew==0 ){
+ pIter->pIndex->rc = SQLITE_NOMEM;
+ return;
+ }
+ pT->aMap = aNew;
+ pT->nMapAlloc = nNew;
+ }
+
+ pIter->poslist.n = 0;
+
+ while( 1 ){
+ i64 iMinPos = LARGEST_INT64;
+
+ /* Find smallest position */
+ iMin = 0;
+ for(ii=0; ii<nReader; ii++){
+ Fts5PoslistReader *pReader = &pT->aPoslistReader[ii];
+ if( pReader->bEof==0 ){
+ if( pReader->iPos<iMinPos ){
+ iMinPos = pReader->iPos;
+ iMin = ii;
+ }
+ }
+ }
+
+ /* If all readers were at EOF, break out of the loop. */
+ if( iMinPos==LARGEST_INT64 ) break;
+
+ sqlite3Fts5PoslistSafeAppend(&pIter->poslist, &iPrev, iMinPos);
+ sqlite3Fts5PoslistReaderNext(&pT->aPoslistReader[iMin]);
+
+ if( eDetail==FTS5_DETAIL_FULL ){
+ pT->aMap[pT->nMap].iPos = iMinPos;
+ pT->aMap[pT->nMap].iIter = pT->aPoslistToIter[iMin];
+ pT->aMap[pT->nMap].iRowid = iRowid;
+ pT->nMap++;
+ }
+ }
+
+ pIter->base.pData = pIter->poslist.p;
+ pIter->base.nData = pIter->poslist.n;
+ }
+ }
+}
+
+/*
+** The iterator passed as the only argument must be a tokendata=1 iterator
+** (pIter->pTokenDataIter!=0). This function advances the iterator. If
+** argument bFrom is false, then the iterator is advanced to the next
+** entry. Or, if bFrom is true, it is advanced to the first entry with
+** a rowid of iFrom or greater.
+*/
+static void fts5TokendataIterNext(Fts5Iter *pIter, int bFrom, i64 iFrom){
+ int ii;
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+
+ for(ii=0; ii<pT->nIter; ii++){
+ Fts5Iter *p = pT->apIter[ii];
+ if( p->base.bEof==0
+ && (p->base.iRowid==pIter->base.iRowid || (bFrom && p->base.iRowid<iFrom))
+ ){
+ fts5MultiIterNext(p->pIndex, p, bFrom, iFrom);
+ while( bFrom && p->base.bEof==0
+ && p->base.iRowid<iFrom
+ && p->pIndex->rc==SQLITE_OK
+ ){
+ fts5MultiIterNext(p->pIndex, p, 0, 0);
+ }
+ }
+ }
+
+ fts5IterSetOutputsTokendata(pIter);
+}
+
+/*
+** If the segment-iterator passed as the first argument is at EOF, then
+** set pIter->term to a copy of buffer pTerm.
+*/
+static void fts5TokendataSetTermIfEof(Fts5Iter *pIter, Fts5Buffer *pTerm){
+ if( pIter && pIter->aSeg[0].pLeaf==0 ){
+ fts5BufferSet(&pIter->pIndex->rc, &pIter->aSeg[0].term, pTerm->n, pTerm->p);
+ }
+}
+
+/*
+** This function sets up an iterator to use for a non-prefix query on a
+** tokendata=1 table.
+*/
+static Fts5Iter *fts5SetupTokendataIter(
+ Fts5Index *p, /* FTS index to query */
+ const u8 *pToken, /* Buffer containing query term */
+ int nToken, /* Size of buffer pToken in bytes */
+ Fts5Colset *pColset /* Colset to filter on */
+){
+ Fts5Iter *pRet = 0;
+ Fts5TokenDataIter *pSet = 0;
+ Fts5Structure *pStruct = 0;
+ const int flags = FTS5INDEX_QUERY_SCANONETERM | FTS5INDEX_QUERY_SCAN;
+
+ Fts5Buffer bSeek = {0, 0, 0};
+ Fts5Buffer *pSmall = 0;
+
+ fts5IndexFlush(p);
+ pStruct = fts5StructureRead(p);
+
+ while( p->rc==SQLITE_OK ){
+ Fts5Iter *pPrev = pSet ? pSet->apIter[pSet->nIter-1] : 0;
+ Fts5Iter *pNew = 0;
+ Fts5SegIter *pNewIter = 0;
+ Fts5SegIter *pPrevIter = 0;
+
+ int iLvl, iSeg, ii;
+
+ pNew = fts5MultiIterAlloc(p, pStruct->nSegment);
+ if( pSmall ){
+ fts5BufferSet(&p->rc, &bSeek, pSmall->n, pSmall->p);
+ fts5BufferAppendBlob(&p->rc, &bSeek, 1, (const u8*)"\0");
+ }else{
+ fts5BufferSet(&p->rc, &bSeek, nToken, pToken);
+ }
+ if( p->rc ){
+ sqlite3Fts5IterClose((Fts5IndexIter*)pNew);
+ break;
+ }
+
+ pNewIter = &pNew->aSeg[0];
+ pPrevIter = (pPrev ? &pPrev->aSeg[0] : 0);
+ for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
+ for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){
+ Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg];
+ int bDone = 0;
+
+ if( pPrevIter ){
+ if( fts5BufferCompare(pSmall, &pPrevIter->term) ){
+ memcpy(pNewIter, pPrevIter, sizeof(Fts5SegIter));
+ memset(pPrevIter, 0, sizeof(Fts5SegIter));
+ bDone = 1;
+ }else if( pPrevIter->iEndofDoclist>pPrevIter->pLeaf->szLeaf ){
+ fts5SegIterNextInit(p,(const char*)bSeek.p,bSeek.n-1,pSeg,pNewIter);
+ bDone = 1;
+ }
+ }
+
+ if( bDone==0 ){
+ fts5SegIterSeekInit(p, bSeek.p, bSeek.n, flags, pSeg, pNewIter);
+ }
+
+ if( pPrevIter ){
+ if( pPrevIter->pTombArray ){
+ pNewIter->pTombArray = pPrevIter->pTombArray;
+ pNewIter->pTombArray->nRef++;
+ }
+ }else{
+ fts5SegIterAllocTombstone(p, pNewIter);
+ }
+
+ pNewIter++;
+ if( pPrevIter ) pPrevIter++;
+ if( p->rc ) break;
+ }
+ }
+ fts5TokendataSetTermIfEof(pPrev, pSmall);
+
+ pNew->bSkipEmpty = 1;
+ pNew->pColset = pColset;
+ fts5IterSetOutputCb(&p->rc, pNew);
+
+ /* Loop through all segments in the new iterator. Find the smallest
+ ** term that any segment-iterator points to. Iterator pNew will be
+ ** used for this term. Also, set any iterator that points to a term that
+ ** does not match pToken/nToken to point to EOF */
+ pSmall = 0;
+ for(ii=0; ii<pNew->nSeg; ii++){
+ Fts5SegIter *pII = &pNew->aSeg[ii];
+ if( 0==fts5IsTokendataPrefix(&pII->term, pToken, nToken) ){
+ fts5SegIterSetEOF(pII);
+ }
+ if( pII->pLeaf && (!pSmall || fts5BufferCompare(pSmall, &pII->term)>0) ){
+ pSmall = &pII->term;
+ }
+ }
+
+ /* If pSmall is still NULL at this point, then the new iterator does
+ ** not point to any terms that match the query. So delete it and break
+ ** out of the loop - all required iterators have been collected. */
+ if( pSmall==0 ){
+ sqlite3Fts5IterClose((Fts5IndexIter*)pNew);
+ break;
+ }
+
+ /* Append this iterator to the set and continue. */
+ pSet = fts5AppendTokendataIter(p, pSet, pNew);
+ }
+
+ if( p->rc==SQLITE_OK && pSet ){
+ int ii;
+ for(ii=0; ii<pSet->nIter; ii++){
+ Fts5Iter *pIter = pSet->apIter[ii];
+ int iSeg;
+ for(iSeg=0; iSeg<pIter->nSeg; iSeg++){
+ pIter->aSeg[iSeg].flags |= FTS5_SEGITER_ONETERM;
+ }
+ fts5MultiIterFinishSetup(p, pIter);
+ }
+ }
+
+ if( p->rc==SQLITE_OK ){
+ pRet = fts5MultiIterAlloc(p, 0);
+ }
+ if( pRet ){
+ pRet->pTokenDataIter = pSet;
+ if( pSet ){
+ fts5IterSetOutputsTokendata(pRet);
+ }else{
+ pRet->base.bEof = 1;
+ }
+ }else{
+ fts5TokendataIterDelete(pSet);
+ }
+
+ fts5StructureRelease(pStruct);
+ fts5BufferFree(&bSeek);
+ return pRet;
+}
+
+
+/*
** Open a new iterator to iterate though all rowid that match the
** specified token or token prefix.
*/
@@ -238547,8 +245570,13 @@ static int sqlite3Fts5IndexQuery(
if( sqlite3Fts5BufferSize(&p->rc, &buf, nToken+1)==0 ){
int iIdx = 0; /* Index to search */
int iPrefixIdx = 0; /* +1 prefix index */
+ int bTokendata = pConfig->bTokendata;
if( nToken>0 ) memcpy(&buf.p[1], pToken, nToken);
+ if( flags & (FTS5INDEX_QUERY_NOTOKENDATA|FTS5INDEX_QUERY_SCAN) ){
+ bTokendata = 0;
+ }
+
/* Figure out which index to search and set iIdx accordingly. If this
** is a prefix query for which there is no prefix index, set iIdx to
** greater than pConfig->nPrefix to indicate that the query will be
@@ -238574,7 +245602,10 @@ static int sqlite3Fts5IndexQuery(
}
}
- if( iIdx<=pConfig->nPrefix ){
+ if( bTokendata && iIdx==0 ){
+ buf.p[0] = '0';
+ pRet = fts5SetupTokendataIter(p, buf.p, nToken+1, pColset);
+ }else if( iIdx<=pConfig->nPrefix ){
/* Straight index lookup */
Fts5Structure *pStruct = fts5StructureRead(p);
buf.p[0] = (u8)(FTS5_MAIN_PREFIX + iIdx);
@@ -238621,7 +245652,11 @@ static int sqlite3Fts5IndexQuery(
static int sqlite3Fts5IterNext(Fts5IndexIter *pIndexIter){
Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
assert( pIter->pIndex->rc==SQLITE_OK );
- fts5MultiIterNext(pIter->pIndex, pIter, 0, 0);
+ if( pIter->pTokenDataIter ){
+ fts5TokendataIterNext(pIter, 0, 0);
+ }else{
+ fts5MultiIterNext(pIter->pIndex, pIter, 0, 0);
+ }
return fts5IndexReturn(pIter->pIndex);
}
@@ -238654,7 +245689,11 @@ static int sqlite3Fts5IterNextScan(Fts5IndexIter *pIndexIter){
*/
static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){
Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
- fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch);
+ if( pIter->pTokenDataIter ){
+ fts5TokendataIterNext(pIter, 1, iMatch);
+ }else{
+ fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch);
+ }
return fts5IndexReturn(pIter->pIndex);
}
@@ -238670,12 +245709,106 @@ static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){
}
/*
+** This is used by xInstToken() to access the token at offset iOff, column
+** iCol of row iRowid. The token is returned via output variables *ppOut
+** and *pnOut. The iterator passed as the first argument must be a tokendata=1
+** iterator (pIter->pTokenDataIter!=0).
+*/
+static int sqlite3Fts5IterToken(
+ Fts5IndexIter *pIndexIter,
+ i64 iRowid,
+ int iCol,
+ int iOff,
+ const char **ppOut, int *pnOut
+){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+ Fts5TokenDataMap *aMap = pT->aMap;
+ i64 iPos = (((i64)iCol)<<32) + iOff;
+
+ int i1 = 0;
+ int i2 = pT->nMap;
+ int iTest = 0;
+
+ while( i2>i1 ){
+ iTest = (i1 + i2) / 2;
+
+ if( aMap[iTest].iRowid<iRowid ){
+ i1 = iTest+1;
+ }else if( aMap[iTest].iRowid>iRowid ){
+ i2 = iTest;
+ }else{
+ if( aMap[iTest].iPos<iPos ){
+ if( aMap[iTest].iPos<0 ){
+ break;
+ }
+ i1 = iTest+1;
+ }else if( aMap[iTest].iPos>iPos ){
+ i2 = iTest;
+ }else{
+ break;
+ }
+ }
+ }
+
+ if( i2>i1 ){
+ Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter];
+ *ppOut = (const char*)pMap->aSeg[0].term.p+1;
+ *pnOut = pMap->aSeg[0].term.n-1;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Clear any existing entries from the token-map associated with the
+** iterator passed as the only argument.
+*/
+static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter *pIndexIter){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ if( pIter && pIter->pTokenDataIter ){
+ pIter->pTokenDataIter->nMap = 0;
+ }
+}
+
+/*
+** Set a token-mapping for the iterator passed as the first argument. This
+** is used in detail=column or detail=none mode when a token is requested
+** using the xInstToken() API. In this case the caller tokenizers the
+** current row and configures the token-mapping via multiple calls to this
+** function.
+*/
+static int sqlite3Fts5IndexIterWriteTokendata(
+ Fts5IndexIter *pIndexIter,
+ const char *pToken, int nToken,
+ i64 iRowid, int iCol, int iOff
+){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+ Fts5Index *p = pIter->pIndex;
+ int ii;
+
+ assert( p->pConfig->eDetail!=FTS5_DETAIL_FULL );
+ assert( pIter->pTokenDataIter );
+
+ for(ii=0; ii<pT->nIter; ii++){
+ Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term;
+ if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break;
+ }
+ if( ii<pT->nIter ){
+ fts5TokendataIterAppendMap(p, pT, ii, iRowid, (((i64)iCol)<<32) + iOff);
+ }
+ return fts5IndexReturn(p);
+}
+
+/*
** Close an iterator opened by an earlier call to sqlite3Fts5IndexQuery().
*/
static void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){
if( pIndexIter ){
Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
Fts5Index *pIndex = pIter->pIndex;
+ fts5TokendataIterDelete(pIter->pTokenDataIter);
fts5MultiIterFree(pIter);
sqlite3Fts5IndexCloseReader(pIndex);
}
@@ -238759,6 +245892,347 @@ static int sqlite3Fts5IndexLoadConfig(Fts5Index *p){
return fts5IndexReturn(p);
}
+/*
+** Retrieve the origin value that will be used for the segment currently
+** being accumulated in the in-memory hash table when it is flushed to
+** disk. If successful, SQLITE_OK is returned and (*piOrigin) set to
+** the queried value. Or, if an error occurs, an error code is returned
+** and the final value of (*piOrigin) is undefined.
+*/
+static int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin){
+ Fts5Structure *pStruct;
+ pStruct = fts5StructureRead(p);
+ if( pStruct ){
+ *piOrigin = pStruct->nOriginCntr;
+ fts5StructureRelease(pStruct);
+ }
+ return fts5IndexReturn(p);
+}
+
+/*
+** Buffer pPg contains a page of a tombstone hash table - one of nPg pages
+** associated with the same segment. This function adds rowid iRowid to
+** the hash table. The caller is required to guarantee that there is at
+** least one free slot on the page.
+**
+** If parameter bForce is false and the hash table is deemed to be full
+** (more than half of the slots are occupied), then non-zero is returned
+** and iRowid not inserted. Or, if bForce is true or if the hash table page
+** is not full, iRowid is inserted and zero returned.
+*/
+static int fts5IndexTombstoneAddToPage(
+ Fts5Data *pPg,
+ int bForce,
+ int nPg,
+ u64 iRowid
+){
+ const int szKey = TOMBSTONE_KEYSIZE(pPg);
+ const int nSlot = TOMBSTONE_NSLOT(pPg);
+ const int nElem = fts5GetU32(&pPg->p[4]);
+ int iSlot = (iRowid / nPg) % nSlot;
+ int nCollide = nSlot;
+
+ if( szKey==4 && iRowid>0xFFFFFFFF ) return 2;
+ if( iRowid==0 ){
+ pPg->p[1] = 0x01;
+ return 0;
+ }
+
+ if( bForce==0 && nElem>=(nSlot/2) ){
+ return 1;
+ }
+
+ fts5PutU32(&pPg->p[4], nElem+1);
+ if( szKey==4 ){
+ u32 *aSlot = (u32*)&pPg->p[8];
+ while( aSlot[iSlot] ){
+ iSlot = (iSlot + 1) % nSlot;
+ if( nCollide--==0 ) return 0;
+ }
+ fts5PutU32((u8*)&aSlot[iSlot], (u32)iRowid);
+ }else{
+ u64 *aSlot = (u64*)&pPg->p[8];
+ while( aSlot[iSlot] ){
+ iSlot = (iSlot + 1) % nSlot;
+ if( nCollide--==0 ) return 0;
+ }
+ fts5PutU64((u8*)&aSlot[iSlot], iRowid);
+ }
+
+ return 0;
+}
+
+/*
+** This function attempts to build a new hash containing all the keys
+** currently in the tombstone hash table for segment pSeg. The new
+** hash will be stored in the nOut buffers passed in array apOut[].
+** All pages of the new hash use key-size szKey (4 or 8).
+**
+** Return 0 if the hash is successfully rebuilt into the nOut pages.
+** Or non-zero if it is not (because one page became overfull). In this
+** case the caller should retry with a larger nOut parameter.
+**
+** Parameter pData1 is page iPg1 of the hash table being rebuilt.
+*/
+static int fts5IndexTombstoneRehash(
+ Fts5Index *p,
+ Fts5StructureSegment *pSeg, /* Segment to rebuild hash of */
+ Fts5Data *pData1, /* One page of current hash - or NULL */
+ int iPg1, /* Which page of the current hash is pData1 */
+ int szKey, /* 4 or 8, the keysize */
+ int nOut, /* Number of output pages */
+ Fts5Data **apOut /* Array of output hash pages */
+){
+ int ii;
+ int res = 0;
+
+ /* Initialize the headers of all the output pages */
+ for(ii=0; ii<nOut; ii++){
+ apOut[ii]->p[0] = szKey;
+ fts5PutU32(&apOut[ii]->p[4], 0);
+ }
+
+ /* Loop through the current pages of the hash table. */
+ for(ii=0; res==0 && ii<pSeg->nPgTombstone; ii++){
+ Fts5Data *pData = 0; /* Page ii of the current hash table */
+ Fts5Data *pFree = 0; /* Free this at the end of the loop */
+
+ if( iPg1==ii ){
+ pData = pData1;
+ }else{
+ pFree = pData = fts5DataRead(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid, ii));
+ }
+
+ if( pData ){
+ int szKeyIn = TOMBSTONE_KEYSIZE(pData);
+ int nSlotIn = (pData->nn - 8) / szKeyIn;
+ int iIn;
+ for(iIn=0; iIn<nSlotIn; iIn++){
+ u64 iVal = 0;
+
+ /* Read the value from slot iIn of the input page into iVal. */
+ if( szKeyIn==4 ){
+ u32 *aSlot = (u32*)&pData->p[8];
+ if( aSlot[iIn] ) iVal = fts5GetU32((u8*)&aSlot[iIn]);
+ }else{
+ u64 *aSlot = (u64*)&pData->p[8];
+ if( aSlot[iIn] ) iVal = fts5GetU64((u8*)&aSlot[iIn]);
+ }
+
+ /* If iVal is not 0 at this point, insert it into the new hash table */
+ if( iVal ){
+ Fts5Data *pPg = apOut[(iVal % nOut)];
+ res = fts5IndexTombstoneAddToPage(pPg, 0, nOut, iVal);
+ if( res ) break;
+ }
+ }
+
+ /* If this is page 0 of the old hash, copy the rowid-0-flag from the
+ ** old hash to the new. */
+ if( ii==0 ){
+ apOut[0]->p[1] = pData->p[1];
+ }
+ }
+ fts5DataRelease(pFree);
+ }
+
+ return res;
+}
+
+/*
+** This is called to rebuild the hash table belonging to segment pSeg.
+** If parameter pData1 is not NULL, then one page of the existing hash table
+** has already been loaded - pData1, which is page iPg1. The key-size for
+** the new hash table is szKey (4 or 8).
+**
+** If successful, the new hash table is not written to disk. Instead,
+** output parameter (*pnOut) is set to the number of pages in the new
+** hash table, and (*papOut) to point to an array of buffers containing
+** the new page data.
+**
+** If an error occurs, an error code is left in the Fts5Index object and
+** both output parameters set to 0 before returning.
+*/
+static void fts5IndexTombstoneRebuild(
+ Fts5Index *p,
+ Fts5StructureSegment *pSeg, /* Segment to rebuild hash of */
+ Fts5Data *pData1, /* One page of current hash - or NULL */
+ int iPg1, /* Which page of the current hash is pData1 */
+ int szKey, /* 4 or 8, the keysize */
+ int *pnOut, /* OUT: Number of output pages */
+ Fts5Data ***papOut /* OUT: Output hash pages */
+){
+ const int MINSLOT = 32;
+ int nSlotPerPage = MAX(MINSLOT, (p->pConfig->pgsz - 8) / szKey);
+ int nSlot = 0; /* Number of slots in each output page */
+ int nOut = 0;
+
+ /* Figure out how many output pages (nOut) and how many slots per
+ ** page (nSlot). There are three possibilities:
+ **
+ ** 1. The hash table does not yet exist. In this case the new hash
+ ** table will consist of a single page with MINSLOT slots.
+ **
+ ** 2. The hash table exists but is currently a single page. In this
+ ** case an attempt is made to grow the page to accommodate the new
+ ** entry. The page is allowed to grow up to nSlotPerPage (see above)
+ ** slots.
+ **
+ ** 3. The hash table already consists of more than one page, or of
+ ** a single page already so large that it cannot be grown. In this
+ ** case the new hash consists of (nPg*2+1) pages of nSlotPerPage
+ ** slots each, where nPg is the current number of pages in the
+ ** hash table.
+ */
+ if( pSeg->nPgTombstone==0 ){
+ /* Case 1. */
+ nOut = 1;
+ nSlot = MINSLOT;
+ }else if( pSeg->nPgTombstone==1 ){
+ /* Case 2. */
+ int nElem = (int)fts5GetU32(&pData1->p[4]);
+ assert( pData1 && iPg1==0 );
+ nOut = 1;
+ nSlot = MAX(nElem*4, MINSLOT);
+ if( nSlot>nSlotPerPage ) nOut = 0;
+ }
+ if( nOut==0 ){
+ /* Case 3. */
+ nOut = (pSeg->nPgTombstone * 2 + 1);
+ nSlot = nSlotPerPage;
+ }
+
+ /* Allocate the required array and output pages */
+ while( 1 ){
+ int res = 0;
+ int ii = 0;
+ int szPage = 0;
+ Fts5Data **apOut = 0;
+
+ /* Allocate space for the new hash table */
+ assert( nSlot>=MINSLOT );
+ apOut = (Fts5Data**)sqlite3Fts5MallocZero(&p->rc, sizeof(Fts5Data*) * nOut);
+ szPage = 8 + nSlot*szKey;
+ for(ii=0; ii<nOut; ii++){
+ Fts5Data *pNew = (Fts5Data*)sqlite3Fts5MallocZero(&p->rc,
+ sizeof(Fts5Data)+szPage
+ );
+ if( pNew ){
+ pNew->nn = szPage;
+ pNew->p = (u8*)&pNew[1];
+ apOut[ii] = pNew;
+ }
+ }
+
+ /* Rebuild the hash table. */
+ if( p->rc==SQLITE_OK ){
+ res = fts5IndexTombstoneRehash(p, pSeg, pData1, iPg1, szKey, nOut, apOut);
+ }
+ if( res==0 ){
+ if( p->rc ){
+ fts5IndexFreeArray(apOut, nOut);
+ apOut = 0;
+ nOut = 0;
+ }
+ *pnOut = nOut;
+ *papOut = apOut;
+ break;
+ }
+
+ /* If control flows to here, it was not possible to rebuild the hash
+ ** table. Free all buffers and then try again with more pages. */
+ assert( p->rc==SQLITE_OK );
+ fts5IndexFreeArray(apOut, nOut);
+ nSlot = nSlotPerPage;
+ nOut = nOut*2 + 1;
+ }
+}
+
+
+/*
+** Add a tombstone for rowid iRowid to segment pSeg.
+*/
+static void fts5IndexTombstoneAdd(
+ Fts5Index *p,
+ Fts5StructureSegment *pSeg,
+ u64 iRowid
+){
+ Fts5Data *pPg = 0;
+ int iPg = -1;
+ int szKey = 0;
+ int nHash = 0;
+ Fts5Data **apHash = 0;
+
+ p->nContentlessDelete++;
+
+ if( pSeg->nPgTombstone>0 ){
+ iPg = iRowid % pSeg->nPgTombstone;
+ pPg = fts5DataRead(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid,iPg));
+ if( pPg==0 ){
+ assert( p->rc!=SQLITE_OK );
+ return;
+ }
+
+ if( 0==fts5IndexTombstoneAddToPage(pPg, 0, pSeg->nPgTombstone, iRowid) ){
+ fts5DataWrite(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid,iPg), pPg->p, pPg->nn);
+ fts5DataRelease(pPg);
+ return;
+ }
+ }
+
+ /* Have to rebuild the hash table. First figure out the key-size (4 or 8). */
+ szKey = pPg ? TOMBSTONE_KEYSIZE(pPg) : 4;
+ if( iRowid>0xFFFFFFFF ) szKey = 8;
+
+ /* Rebuild the hash table */
+ fts5IndexTombstoneRebuild(p, pSeg, pPg, iPg, szKey, &nHash, &apHash);
+ assert( p->rc==SQLITE_OK || (nHash==0 && apHash==0) );
+
+ /* If all has succeeded, write the new rowid into one of the new hash
+ ** table pages, then write them all out to disk. */
+ if( nHash ){
+ int ii = 0;
+ fts5IndexTombstoneAddToPage(apHash[iRowid % nHash], 1, nHash, iRowid);
+ for(ii=0; ii<nHash; ii++){
+ i64 iTombstoneRowid = FTS5_TOMBSTONE_ROWID(pSeg->iSegid, ii);
+ fts5DataWrite(p, iTombstoneRowid, apHash[ii]->p, apHash[ii]->nn);
+ }
+ pSeg->nPgTombstone = nHash;
+ fts5StructureWrite(p, p->pStruct);
+ }
+
+ fts5DataRelease(pPg);
+ fts5IndexFreeArray(apHash, nHash);
+}
+
+/*
+** Add iRowid to the tombstone list of the segment or segments that contain
+** rows from origin iOrigin. Return SQLITE_OK if successful, or an SQLite
+** error code otherwise.
+*/
+static int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid){
+ Fts5Structure *pStruct;
+ pStruct = fts5StructureRead(p);
+ if( pStruct ){
+ int bFound = 0; /* True after pSeg->nEntryTombstone incr. */
+ int iLvl;
+ for(iLvl=pStruct->nLevel-1; iLvl>=0; iLvl--){
+ int iSeg;
+ for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){
+ Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg];
+ if( pSeg->iOrigin1<=(u64)iOrigin && pSeg->iOrigin2>=(u64)iOrigin ){
+ if( bFound==0 ){
+ pSeg->nEntryTombstone++;
+ bFound = 1;
+ }
+ fts5IndexTombstoneAdd(p, pSeg, iRowid);
+ }
+ }
+ }
+ fts5StructureRelease(pStruct);
+ }
+ return fts5IndexReturn(p);
+}
/*************************************************************************
**************************************************************************
@@ -238842,7 +246316,9 @@ static int fts5QueryCksum(
int eDetail = p->pConfig->eDetail;
u64 cksum = *pCksum;
Fts5IndexIter *pIter = 0;
- int rc = sqlite3Fts5IndexQuery(p, z, n, flags, 0, &pIter);
+ int rc = sqlite3Fts5IndexQuery(
+ p, z, n, (flags | FTS5INDEX_QUERY_NOTOKENDATA), 0, &pIter
+ );
while( rc==SQLITE_OK && ALWAYS(pIter!=0) && 0==sqlite3Fts5IterEof(pIter) ){
i64 rowid = pIter->iRowid;
@@ -239009,7 +246485,7 @@ static void fts5IndexIntegrityCheckEmpty(
}
static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){
- int iTermOff = 0;
+ i64 iTermOff = 0;
int ii;
Fts5Buffer buf1 = {0,0,0};
@@ -239018,7 +246494,7 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){
ii = pLeaf->szLeaf;
while( ii<pLeaf->nn && p->rc==SQLITE_OK ){
int res;
- int iOff;
+ i64 iOff;
int nIncr;
ii += fts5GetVarint32(&pLeaf->p[ii], nIncr);
@@ -239310,13 +246786,14 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum
** function only.
*/
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** Decode a segment-data rowid from the %_data table. This function is
** the opposite of macro FTS5_SEGMENT_ROWID().
*/
static void fts5DecodeRowid(
i64 iRowid, /* Rowid from %_data table */
+ int *pbTombstone, /* OUT: Tombstone hash flag */
int *piSegid, /* OUT: Segment id */
int *pbDlidx, /* OUT: Dlidx flag */
int *piHeight, /* OUT: Height */
@@ -239332,13 +246809,16 @@ static void fts5DecodeRowid(
iRowid >>= FTS5_DATA_DLI_B;
*piSegid = (int)(iRowid & (((i64)1 << FTS5_DATA_ID_B) - 1));
+ iRowid >>= FTS5_DATA_ID_B;
+
+ *pbTombstone = (int)(iRowid & 0x0001);
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){
- int iSegid, iHeight, iPgno, bDlidx; /* Rowid compenents */
- fts5DecodeRowid(iKey, &iSegid, &bDlidx, &iHeight, &iPgno);
+ int iSegid, iHeight, iPgno, bDlidx, bTomb; /* Rowid compenents */
+ fts5DecodeRowid(iKey, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno);
if( iSegid==0 ){
if( iKey==FTS5_AVERAGES_ROWID ){
@@ -239348,14 +246828,16 @@ static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){
}
}
else{
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%ssegid=%d h=%d pgno=%d}",
- bDlidx ? "dlidx " : "", iSegid, iHeight, iPgno
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%s%ssegid=%d h=%d pgno=%d}",
+ bDlidx ? "dlidx " : "",
+ bTomb ? "tombstone " : "",
+ iSegid, iHeight, iPgno
);
}
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
static void fts5DebugStructure(
int *pRc, /* IN/OUT: error code */
Fts5Buffer *pBuf,
@@ -239370,16 +246852,22 @@ static void fts5DebugStructure(
);
for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){
Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg];
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d}",
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d",
pSeg->iSegid, pSeg->pgnoFirst, pSeg->pgnoLast
);
+ if( pSeg->iOrigin1>0 ){
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " origin=%lld..%lld",
+ pSeg->iOrigin1, pSeg->iOrigin2
+ );
+ }
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}");
}
sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}");
}
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** This is part of the fts5_decode() debugging aid.
**
@@ -239404,9 +246892,9 @@ static void fts5DecodeStructure(
fts5DebugStructure(pRc, pBuf, p);
fts5StructureRelease(p);
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** This is part of the fts5_decode() debugging aid.
**
@@ -239429,9 +246917,9 @@ static void fts5DecodeAverages(
zSpace = " ";
}
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** Buffer (a/n) is assumed to contain a list of serialized varints. Read
** each varint and append its string representation to buffer pBuf. Return
@@ -239448,9 +246936,9 @@ static int fts5DecodePoslist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){
}
return iOff;
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** The start of buffer (a/n) contains the start of a doclist. The doclist
** may or may not finish within the buffer. This function appends a text
@@ -239483,9 +246971,9 @@ static int fts5DecodeDoclist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){
return iOff;
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** This function is part of the fts5_decode() debugging function. It is
** only ever used with detail=none tables.
@@ -239526,9 +247014,27 @@ static void fts5DecodeRowidList(
sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " %lld%s", iRowid, zApp);
}
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
+static void fts5BufferAppendTerm(int *pRc, Fts5Buffer *pBuf, Fts5Buffer *pTerm){
+ int ii;
+ fts5BufferGrow(pRc, pBuf, pTerm->n*2 + 1);
+ if( *pRc==SQLITE_OK ){
+ for(ii=0; ii<pTerm->n; ii++){
+ if( pTerm->p[ii]==0x00 ){
+ pBuf->p[pBuf->n++] = '\\';
+ pBuf->p[pBuf->n++] = '0';
+ }else{
+ pBuf->p[pBuf->n++] = pTerm->p[ii];
+ }
+ }
+ pBuf->p[pBuf->n] = 0x00;
+ }
+}
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
+
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** The implementation of user-defined scalar function fts5_decode().
*/
@@ -239539,6 +247045,7 @@ static void fts5DecodeFunction(
){
i64 iRowid; /* Rowid for record being decoded */
int iSegid,iHeight,iPgno,bDlidx;/* Rowid components */
+ int bTomb;
const u8 *aBlob; int n; /* Record to decode */
u8 *a = 0;
Fts5Buffer s; /* Build up text to return here */
@@ -239561,7 +247068,7 @@ static void fts5DecodeFunction(
if( a==0 ) goto decode_out;
if( n>0 ) memcpy(a, aBlob, n);
- fts5DecodeRowid(iRowid, &iSegid, &bDlidx, &iHeight, &iPgno);
+ fts5DecodeRowid(iRowid, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno);
fts5DebugRowid(&rc, &s, iRowid);
if( bDlidx ){
@@ -239580,6 +247087,28 @@ static void fts5DecodeFunction(
" %d(%lld)", lvl.iLeafPgno, lvl.iRowid
);
}
+ }else if( bTomb ){
+ u32 nElem = fts5GetU32(&a[4]);
+ int szKey = (aBlob[0]==4 || aBlob[0]==8) ? aBlob[0] : 8;
+ int nSlot = (n - 8) / szKey;
+ int ii;
+ sqlite3Fts5BufferAppendPrintf(&rc, &s, " nElem=%d", (int)nElem);
+ if( aBlob[1] ){
+ sqlite3Fts5BufferAppendPrintf(&rc, &s, " 0");
+ }
+ for(ii=0; ii<nSlot; ii++){
+ u64 iVal = 0;
+ if( szKey==4 ){
+ u32 *aSlot = (u32*)&aBlob[8];
+ if( aSlot[ii] ) iVal = fts5GetU32((u8*)&aSlot[ii]);
+ }else{
+ u64 *aSlot = (u64*)&aBlob[8];
+ if( aSlot[ii] ) iVal = fts5GetU64((u8*)&aSlot[ii]);
+ }
+ if( iVal!=0 ){
+ sqlite3Fts5BufferAppendPrintf(&rc, &s, " %lld", (i64)iVal);
+ }
+ }
}else if( iSegid==0 ){
if( iRowid==FTS5_AVERAGES_ROWID ){
fts5DecodeAverages(&rc, &s, a, n);
@@ -239605,16 +247134,15 @@ static void fts5DecodeFunction(
fts5DecodeRowidList(&rc, &s, &a[4], iTermOff-4);
iOff = iTermOff;
- while( iOff<szLeaf ){
+ while( iOff<szLeaf && rc==SQLITE_OK ){
int nAppend;
/* Read the term data for the next term*/
iOff += fts5GetVarint32(&a[iOff], nAppend);
term.n = nKeep;
fts5BufferAppendBlob(&rc, &term, nAppend, &a[iOff]);
- sqlite3Fts5BufferAppendPrintf(
- &rc, &s, " term=%.*s", term.n, (const char*)term.p
- );
+ sqlite3Fts5BufferAppendPrintf(&rc, &s, " term=");
+ fts5BufferAppendTerm(&rc, &s, &term);
iOff += nAppend;
/* Figure out where the doclist for this term ends */
@@ -239625,8 +247153,11 @@ static void fts5DecodeFunction(
}else{
iTermOff = szLeaf;
}
-
- fts5DecodeRowidList(&rc, &s, &a[iOff], iTermOff-iOff);
+ if( iTermOff>szLeaf ){
+ rc = FTS5_CORRUPT;
+ }else{
+ fts5DecodeRowidList(&rc, &s, &a[iOff], iTermOff-iOff);
+ }
iOff = iTermOff;
if( iOff<szLeaf ){
iOff += fts5GetVarint32(&a[iOff], nKeep);
@@ -239719,9 +247250,8 @@ static void fts5DecodeFunction(
fts5BufferAppendBlob(&rc, &term, nByte, &a[iOff]);
iOff += nByte;
- sqlite3Fts5BufferAppendPrintf(
- &rc, &s, " term=%.*s", term.n, (const char*)term.p
- );
+ sqlite3Fts5BufferAppendPrintf(&rc, &s, " term=");
+ fts5BufferAppendTerm(&rc, &s, &term);
iOff += fts5DecodeDoclist(&rc, &s, &a[iOff], iEnd-iOff);
}
@@ -239737,9 +247267,9 @@ static void fts5DecodeFunction(
}
fts5BufferFree(&s);
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** The implementation of user-defined scalar function fts5_rowid().
*/
@@ -239773,7 +247303,235 @@ static void fts5RowidFunction(
}
}
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
+
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
+
+typedef struct Fts5StructVtab Fts5StructVtab;
+struct Fts5StructVtab {
+ sqlite3_vtab base;
+};
+
+typedef struct Fts5StructVcsr Fts5StructVcsr;
+struct Fts5StructVcsr {
+ sqlite3_vtab_cursor base;
+ Fts5Structure *pStruct;
+ int iLevel;
+ int iSeg;
+ int iRowid;
+};
+
+/*
+** Create a new fts5_structure() table-valued function.
+*/
+static int fts5structConnectMethod(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ Fts5StructVtab *pNew = 0;
+ int rc = SQLITE_OK;
+
+ rc = sqlite3_declare_vtab(db,
+ "CREATE TABLE xyz("
+ "level, segment, merge, segid, leaf1, leaf2, loc1, loc2, "
+ "npgtombstone, nentrytombstone, nentry, struct HIDDEN);"
+ );
+ if( rc==SQLITE_OK ){
+ pNew = sqlite3Fts5MallocZero(&rc, sizeof(*pNew));
+ }
+
+ *ppVtab = (sqlite3_vtab*)pNew;
+ return rc;
+}
+
+/*
+** We must have a single struct=? constraint that will be passed through
+** into the xFilter method. If there is no valid stmt=? constraint,
+** then return an SQLITE_CONSTRAINT error.
+*/
+static int fts5structBestIndexMethod(
+ sqlite3_vtab *tab,
+ sqlite3_index_info *pIdxInfo
+){
+ int i;
+ int rc = SQLITE_CONSTRAINT;
+ struct sqlite3_index_constraint *p;
+ pIdxInfo->estimatedCost = (double)100;
+ pIdxInfo->estimatedRows = 100;
+ pIdxInfo->idxNum = 0;
+ for(i=0, p=pIdxInfo->aConstraint; i<pIdxInfo->nConstraint; i++, p++){
+ if( p->usable==0 ) continue;
+ if( p->op==SQLITE_INDEX_CONSTRAINT_EQ && p->iColumn==11 ){
+ rc = SQLITE_OK;
+ pIdxInfo->aConstraintUsage[i].omit = 1;
+ pIdxInfo->aConstraintUsage[i].argvIndex = 1;
+ break;
+ }
+ }
+ return rc;
+}
+
+/*
+** This method is the destructor for bytecodevtab objects.
+*/
+static int fts5structDisconnectMethod(sqlite3_vtab *pVtab){
+ Fts5StructVtab *p = (Fts5StructVtab*)pVtab;
+ sqlite3_free(p);
+ return SQLITE_OK;
+}
+
+/*
+** Constructor for a new bytecodevtab_cursor object.
+*/
+static int fts5structOpenMethod(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCsr){
+ int rc = SQLITE_OK;
+ Fts5StructVcsr *pNew = 0;
+
+ pNew = sqlite3Fts5MallocZero(&rc, sizeof(*pNew));
+ *ppCsr = (sqlite3_vtab_cursor*)pNew;
+
+ return SQLITE_OK;
+}
+
+/*
+** Destructor for a bytecodevtab_cursor.
+*/
+static int fts5structCloseMethod(sqlite3_vtab_cursor *cur){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur;
+ fts5StructureRelease(pCsr->pStruct);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+
+/*
+** Advance a bytecodevtab_cursor to its next row of output.
+*/
+static int fts5structNextMethod(sqlite3_vtab_cursor *cur){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur;
+ Fts5Structure *p = pCsr->pStruct;
+
+ assert( pCsr->pStruct );
+ pCsr->iSeg++;
+ pCsr->iRowid++;
+ while( pCsr->iLevel<p->nLevel && pCsr->iSeg>=p->aLevel[pCsr->iLevel].nSeg ){
+ pCsr->iLevel++;
+ pCsr->iSeg = 0;
+ }
+ if( pCsr->iLevel>=p->nLevel ){
+ fts5StructureRelease(pCsr->pStruct);
+ pCsr->pStruct = 0;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Return TRUE if the cursor has been moved off of the last
+** row of output.
+*/
+static int fts5structEofMethod(sqlite3_vtab_cursor *cur){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur;
+ return pCsr->pStruct==0;
+}
+
+static int fts5structRowidMethod(
+ sqlite3_vtab_cursor *cur,
+ sqlite_int64 *piRowid
+){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur;
+ *piRowid = pCsr->iRowid;
+ return SQLITE_OK;
+}
+
+/*
+** Return values of columns for the row at which the bytecodevtab_cursor
+** is currently pointing.
+*/
+static int fts5structColumnMethod(
+ sqlite3_vtab_cursor *cur, /* The cursor */
+ sqlite3_context *ctx, /* First argument to sqlite3_result_...() */
+ int i /* Which column to return */
+){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur;
+ Fts5Structure *p = pCsr->pStruct;
+ Fts5StructureSegment *pSeg = &p->aLevel[pCsr->iLevel].aSeg[pCsr->iSeg];
+
+ switch( i ){
+ case 0: /* level */
+ sqlite3_result_int(ctx, pCsr->iLevel);
+ break;
+ case 1: /* segment */
+ sqlite3_result_int(ctx, pCsr->iSeg);
+ break;
+ case 2: /* merge */
+ sqlite3_result_int(ctx, pCsr->iSeg < p->aLevel[pCsr->iLevel].nMerge);
+ break;
+ case 3: /* segid */
+ sqlite3_result_int(ctx, pSeg->iSegid);
+ break;
+ case 4: /* leaf1 */
+ sqlite3_result_int(ctx, pSeg->pgnoFirst);
+ break;
+ case 5: /* leaf2 */
+ sqlite3_result_int(ctx, pSeg->pgnoLast);
+ break;
+ case 6: /* origin1 */
+ sqlite3_result_int64(ctx, pSeg->iOrigin1);
+ break;
+ case 7: /* origin2 */
+ sqlite3_result_int64(ctx, pSeg->iOrigin2);
+ break;
+ case 8: /* npgtombstone */
+ sqlite3_result_int(ctx, pSeg->nPgTombstone);
+ break;
+ case 9: /* nentrytombstone */
+ sqlite3_result_int64(ctx, pSeg->nEntryTombstone);
+ break;
+ case 10: /* nentry */
+ sqlite3_result_int64(ctx, pSeg->nEntry);
+ break;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Initialize a cursor.
+**
+** idxNum==0 means show all subprograms
+** idxNum==1 means show only the main bytecode and omit subprograms.
+*/
+static int fts5structFilterMethod(
+ sqlite3_vtab_cursor *pVtabCursor,
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
+){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr *)pVtabCursor;
+ int rc = SQLITE_OK;
+
+ const u8 *aBlob = 0;
+ int nBlob = 0;
+
+ assert( argc==1 );
+ fts5StructureRelease(pCsr->pStruct);
+ pCsr->pStruct = 0;
+
+ nBlob = sqlite3_value_bytes(argv[0]);
+ aBlob = (const u8*)sqlite3_value_blob(argv[0]);
+ rc = fts5StructureDecode(aBlob, nBlob, 0, &pCsr->pStruct);
+ if( rc==SQLITE_OK ){
+ pCsr->iLevel = 0;
+ pCsr->iRowid = 0;
+ pCsr->iSeg = -1;
+ rc = fts5structNextMethod(pVtabCursor);
+ }
+
+ return rc;
+}
+
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
/*
** This is called as part of registering the FTS5 module with database
@@ -239784,7 +247542,7 @@ static void fts5RowidFunction(
** SQLite error code is returned instead.
*/
static int sqlite3Fts5IndexInit(sqlite3 *db){
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
int rc = sqlite3_create_function(
db, "fts5_decode", 2, SQLITE_UTF8, 0, fts5DecodeFunction, 0, 0
);
@@ -239801,6 +247559,37 @@ static int sqlite3Fts5IndexInit(sqlite3 *db){
db, "fts5_rowid", -1, SQLITE_UTF8, 0, fts5RowidFunction, 0, 0
);
}
+
+ if( rc==SQLITE_OK ){
+ static const sqlite3_module fts5structure_module = {
+ 0, /* iVersion */
+ 0, /* xCreate */
+ fts5structConnectMethod, /* xConnect */
+ fts5structBestIndexMethod, /* xBestIndex */
+ fts5structDisconnectMethod, /* xDisconnect */
+ 0, /* xDestroy */
+ fts5structOpenMethod, /* xOpen */
+ fts5structCloseMethod, /* xClose */
+ fts5structFilterMethod, /* xFilter */
+ fts5structNextMethod, /* xNext */
+ fts5structEofMethod, /* xEof */
+ fts5structColumnMethod, /* xColumn */
+ fts5structRowidMethod, /* xRowid */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindFunction */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
+ };
+ rc = sqlite3_create_module(db, "fts5_structure", &fts5structure_module, 0);
+ }
return rc;
#else
return SQLITE_OK;
@@ -239936,6 +247725,8 @@ struct Fts5FullTable {
Fts5Storage *pStorage; /* Document store */
Fts5Global *pGlobal; /* Global (connection wide) data */
Fts5Cursor *pSortCsr; /* Sort data from this cursor */
+ int iSavepoint; /* Successful xSavepoint()+1 */
+
#ifdef SQLITE_DEBUG
struct Fts5TransactionState ts;
#endif
@@ -240224,6 +248015,13 @@ static int fts5InitVtab(
pConfig->pzErrmsg = 0;
}
+ if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){
+ rc = sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, (int)1);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS);
+ }
+
if( rc!=SQLITE_OK ){
fts5FreeVtab(pTab);
pTab = 0;
@@ -240466,12 +248264,15 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){
}
idxStr[iIdxStr] = '\0';
- /* Set idxFlags flags for the ORDER BY clause */
+ /* Set idxFlags flags for the ORDER BY clause
+ **
+ ** Note that tokendata=1 tables cannot currently handle "ORDER BY rowid DESC".
+ */
if( pInfo->nOrderBy==1 ){
int iSort = pInfo->aOrderBy[0].iColumn;
if( iSort==(pConfig->nCol+1) && bSeenMatch ){
idxFlags |= FTS5_BI_ORDER_RANK;
- }else if( iSort==-1 ){
+ }else if( iSort==-1 && (!pInfo->aOrderBy[0].desc || !pConfig->bTokendata) ){
idxFlags |= FTS5_BI_ORDER_ROWID;
}
if( BitFlagTest(idxFlags, FTS5_BI_ORDER_RANK|FTS5_BI_ORDER_ROWID) ){
@@ -240723,6 +248524,16 @@ static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){
);
assert( !CsrFlagTest(pCsr, FTS5CSR_EOF) );
+ /* If this cursor uses FTS5_PLAN_MATCH and this is a tokendata=1 table,
+ ** clear any token mappings accumulated at the fts5_index.c level. In
+ ** other cases, specifically FTS5_PLAN_SOURCE and FTS5_PLAN_SORTED_MATCH,
+ ** we need to retain the mappings for the entire query. */
+ if( pCsr->ePlan==FTS5_PLAN_MATCH
+ && ((Fts5Table*)pCursor->pVtab)->pConfig->bTokendata
+ ){
+ sqlite3Fts5ExprClearTokens(pCsr->pExpr);
+ }
+
if( pCsr->ePlan<3 ){
int bSkip = 0;
if( (rc = fts5CursorReseek(pCsr, &bSkip)) || bSkip ) return rc;
@@ -241148,6 +248959,9 @@ static int fts5FilterMethod(
pCsr->iFirstRowid = fts5GetRowidLimit(pRowidGe, SMALLEST_INT64);
}
+ rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex);
+ if( rc!=SQLITE_OK ) goto filter_out;
+
if( pTab->pSortCsr ){
/* If pSortCsr is non-NULL, then this call is being made as part of
** processing for a "... MATCH <expr> ORDER BY rank" query (ePlan is
@@ -241170,6 +248984,7 @@ static int fts5FilterMethod(
pCsr->pExpr = pTab->pSortCsr->pExpr;
rc = fts5CursorFirst(pTab, pCsr, bDesc);
}else if( pCsr->pExpr ){
+ assert( rc==SQLITE_OK );
rc = fts5CursorParseRank(pConfig, pCsr, pRank);
if( rc==SQLITE_OK ){
if( bOrderByRank ){
@@ -241341,6 +249156,7 @@ static int fts5SpecialInsert(
Fts5Config *pConfig = pTab->p.pConfig;
int rc = SQLITE_OK;
int bError = 0;
+ int bLoadConfig = 0;
if( 0==sqlite3_stricmp("delete-all", zCmd) ){
if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
@@ -241352,6 +249168,7 @@ static int fts5SpecialInsert(
}else{
rc = sqlite3Fts5StorageDeleteAll(pTab->pStorage);
}
+ bLoadConfig = 1;
}else if( 0==sqlite3_stricmp("rebuild", zCmd) ){
if( pConfig->eContent==FTS5_CONTENT_NONE ){
fts5SetVtabError(pTab,
@@ -241361,6 +249178,7 @@ static int fts5SpecialInsert(
}else{
rc = sqlite3Fts5StorageRebuild(pTab->pStorage);
}
+ bLoadConfig = 1;
}else if( 0==sqlite3_stricmp("optimize", zCmd) ){
rc = sqlite3Fts5StorageOptimize(pTab->pStorage);
}else if( 0==sqlite3_stricmp("merge", zCmd) ){
@@ -241373,8 +249191,13 @@ static int fts5SpecialInsert(
}else if( 0==sqlite3_stricmp("prefix-index", zCmd) ){
pConfig->bPrefixIndex = sqlite3_value_int(pVal);
#endif
+ }else if( 0==sqlite3_stricmp("flush", zCmd) ){
+ rc = sqlite3Fts5FlushToDisk(&pTab->p);
}else{
- rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex);
+ rc = sqlite3Fts5FlushToDisk(&pTab->p);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex);
+ }
if( rc==SQLITE_OK ){
rc = sqlite3Fts5ConfigSetValue(pTab->p.pConfig, zCmd, pVal, &bError);
}
@@ -241386,6 +249209,12 @@ static int fts5SpecialInsert(
}
}
}
+
+ if( rc==SQLITE_OK && bLoadConfig ){
+ pTab->p.pConfig->iCookie--;
+ rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex);
+ }
+
return rc;
}
@@ -241444,7 +249273,6 @@ static int fts5UpdateMethod(
int rc = SQLITE_OK; /* Return code */
int bUpdateOrDelete = 0;
-
/* A transaction must be open when this is called. */
assert( pTab->ts.eState==1 || pTab->ts.eState==2 );
@@ -241473,7 +249301,14 @@ static int fts5UpdateMethod(
if( pConfig->eContent!=FTS5_CONTENT_NORMAL
&& 0==sqlite3_stricmp("delete", z)
){
- rc = fts5SpecialDelete(pTab, apVal);
+ if( pConfig->bContentlessDelete ){
+ fts5SetVtabError(pTab,
+ "'delete' may not be used with a contentless_delete=1 table"
+ );
+ rc = SQLITE_ERROR;
+ }else{
+ rc = fts5SpecialDelete(pTab, apVal);
+ }
}else{
rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]);
}
@@ -241490,7 +249325,7 @@ static int fts5UpdateMethod(
** Cases 3 and 4 may violate the rowid constraint.
*/
int eConflict = SQLITE_ABORT;
- if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
+ if( pConfig->eContent==FTS5_CONTENT_NORMAL || pConfig->bContentlessDelete ){
eConflict = sqlite3_vtab_on_conflict(pConfig->db);
}
@@ -241498,8 +249333,12 @@ static int fts5UpdateMethod(
assert( nArg!=1 || eType0==SQLITE_INTEGER );
/* Filter out attempts to run UPDATE or DELETE on contentless tables.
- ** This is not suported. */
- if( eType0==SQLITE_INTEGER && fts5IsContentless(pTab) ){
+ ** This is not suported. Except - they are both supported if the CREATE
+ ** VIRTUAL TABLE statement contained "contentless_delete=1". */
+ if( eType0==SQLITE_INTEGER
+ && pConfig->eContent==FTS5_CONTENT_NONE
+ && pConfig->bContentlessDelete==0
+ ){
pTab->p.base.zErrMsg = sqlite3_mprintf(
"cannot %s contentless fts5 table: %s",
(nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName
@@ -241523,7 +249362,8 @@ static int fts5UpdateMethod(
}
else if( eType0!=SQLITE_INTEGER ){
- /* If this is a REPLACE, first remove the current entry (if any) */
+ /* An INSERT statement. If the conflict-mode is REPLACE, first remove
+ ** the current entry (if any). */
if( eConflict==SQLITE_REPLACE && eType1==SQLITE_INTEGER ){
i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */
rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0);
@@ -241586,8 +249426,7 @@ static int fts5SyncMethod(sqlite3_vtab *pVtab){
Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
fts5CheckTransactionState(pTab, FTS5_SYNC, 0);
pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg;
- fts5TripCursors(pTab);
- rc = sqlite3Fts5StorageSync(pTab->pStorage);
+ rc = sqlite3Fts5FlushToDisk(&pTab->p);
pTab->p.pConfig->pzErrmsg = 0;
return rc;
}
@@ -241683,7 +249522,10 @@ static int fts5ApiColumnText(
){
int rc = SQLITE_OK;
Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab))
+ Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
+ if( iCol<0 || iCol>=pTab->pConfig->nCol ){
+ rc = SQLITE_RANGE;
+ }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab))
|| pCsr->ePlan==FTS5_PLAN_SPECIAL
){
*pz = 0;
@@ -241708,8 +249550,9 @@ static int fts5CsrPoslist(
int rc = SQLITE_OK;
int bLive = (pCsr->pSorter==0);
- if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){
-
+ if( iPhrase<0 || iPhrase>=sqlite3Fts5ExprPhraseCount(pCsr->pExpr) ){
+ rc = SQLITE_RANGE;
+ }else if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){
if( pConfig->eDetail!=FTS5_DETAIL_FULL ){
Fts5PoslistPopulator *aPopulator;
int i;
@@ -241733,15 +249576,21 @@ static int fts5CsrPoslist(
CsrFlagClear(pCsr, FTS5CSR_REQUIRE_POSLIST);
}
- if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){
- Fts5Sorter *pSorter = pCsr->pSorter;
- int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]);
- *pn = pSorter->aIdx[iPhrase] - i1;
- *pa = &pSorter->aPoslist[i1];
+ if( rc==SQLITE_OK ){
+ if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){
+ Fts5Sorter *pSorter = pCsr->pSorter;
+ int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]);
+ *pn = pSorter->aIdx[iPhrase] - i1;
+ *pa = &pSorter->aPoslist[i1];
+ }else{
+ *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa);
+ }
}else{
- *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa);
+ *pa = 0;
+ *pn = 0;
}
+
return rc;
}
@@ -241848,12 +249697,6 @@ static int fts5ApiInst(
){
if( iIdx<0 || iIdx>=pCsr->nInstCount ){
rc = SQLITE_RANGE;
-#if 0
- }else if( fts5IsOffsetless((Fts5Table*)pCsr->base.pVtab) ){
- *piPhrase = pCsr->aInst[iIdx*3];
- *piCol = pCsr->aInst[iIdx*3 + 2];
- *piOff = -1;
-#endif
}else{
*piPhrase = pCsr->aInst[iIdx*3];
*piCol = pCsr->aInst[iIdx*3 + 1];
@@ -242108,13 +249951,56 @@ static int fts5ApiPhraseFirstColumn(
return rc;
}
+/*
+** xQueryToken() API implemenetation.
+*/
+static int fts5ApiQueryToken(
+ Fts5Context* pCtx,
+ int iPhrase,
+ int iToken,
+ const char **ppOut,
+ int *pnOut
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ return sqlite3Fts5ExprQueryToken(pCsr->pExpr, iPhrase, iToken, ppOut, pnOut);
+}
+
+/*
+** xInstToken() API implemenetation.
+*/
+static int fts5ApiInstToken(
+ Fts5Context *pCtx,
+ int iIdx,
+ int iToken,
+ const char **ppOut, int *pnOut
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ int rc = SQLITE_OK;
+ if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0
+ || SQLITE_OK==(rc = fts5CacheInstArray(pCsr))
+ ){
+ if( iIdx<0 || iIdx>=pCsr->nInstCount ){
+ rc = SQLITE_RANGE;
+ }else{
+ int iPhrase = pCsr->aInst[iIdx*3];
+ int iCol = pCsr->aInst[iIdx*3 + 1];
+ int iOff = pCsr->aInst[iIdx*3 + 2];
+ i64 iRowid = fts5CursorRowid(pCsr);
+ rc = sqlite3Fts5ExprInstToken(
+ pCsr->pExpr, iRowid, iPhrase, iCol, iOff, iToken, ppOut, pnOut
+ );
+ }
+ }
+ return rc;
+}
+
static int fts5ApiQueryPhrase(Fts5Context*, int, void*,
int(*)(const Fts5ExtensionApi*, Fts5Context*, void*)
);
static const Fts5ExtensionApi sFts5Api = {
- 2, /* iVersion */
+ 3, /* iVersion */
fts5ApiUserData,
fts5ApiColumnCount,
fts5ApiRowCount,
@@ -242134,6 +250020,8 @@ static const Fts5ExtensionApi sFts5Api = {
fts5ApiPhraseNext,
fts5ApiPhraseFirstColumn,
fts5ApiPhraseNextColumn,
+ fts5ApiQueryToken,
+ fts5ApiInstToken
};
/*
@@ -242354,6 +250242,12 @@ static int fts5ColumnMethod(
sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1));
}
pConfig->pzErrmsg = 0;
+ }else if( pConfig->bContentlessDelete && sqlite3_vtab_nochange(pCtx) ){
+ char *zErr = sqlite3_mprintf("cannot UPDATE a subset of "
+ "columns on fts5 contentless-delete table: %s", pConfig->zName
+ );
+ sqlite3_result_error(pCtx, zErr, -1);
+ sqlite3_free(zErr);
}
return rc;
}
@@ -242392,8 +250286,10 @@ static int fts5RenameMethod(
sqlite3_vtab *pVtab, /* Virtual table handle */
const char *zName /* New name of table */
){
+ int rc;
Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
- return sqlite3Fts5StorageRename(pTab->pStorage, zName);
+ rc = sqlite3Fts5StorageRename(pTab->pStorage, zName);
+ return rc;
}
static int sqlite3Fts5FlushToDisk(Fts5Table *pTab){
@@ -242407,9 +250303,15 @@ static int sqlite3Fts5FlushToDisk(Fts5Table *pTab){
** Flush the contents of the pending-terms table to disk.
*/
static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
- UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */
- fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_SAVEPOINT, iSavepoint);
- return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab);
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ int rc = SQLITE_OK;
+
+ fts5CheckTransactionState(pTab, FTS5_SAVEPOINT, iSavepoint);
+ rc = sqlite3Fts5FlushToDisk((Fts5Table*)pVtab);
+ if( rc==SQLITE_OK ){
+ pTab->iSavepoint = iSavepoint+1;
+ }
+ return rc;
}
/*
@@ -242418,9 +250320,16 @@ static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
** This is a no-op.
*/
static int fts5ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){
- UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */
- fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_RELEASE, iSavepoint);
- return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab);
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ int rc = SQLITE_OK;
+ fts5CheckTransactionState(pTab, FTS5_RELEASE, iSavepoint);
+ if( (iSavepoint+1)<pTab->iSavepoint ){
+ rc = sqlite3Fts5FlushToDisk(&pTab->p);
+ if( rc==SQLITE_OK ){
+ pTab->iSavepoint = iSavepoint;
+ }
+ }
+ return rc;
}
/*
@@ -242430,11 +250339,14 @@ static int fts5ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){
*/
static int fts5RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){
Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
- UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */
+ int rc = SQLITE_OK;
fts5CheckTransactionState(pTab, FTS5_ROLLBACKTO, iSavepoint);
fts5TripCursors(pTab);
- pTab->p.pConfig->pgsz = 0;
- return sqlite3Fts5StorageRollback(pTab->pStorage);
+ if( (iSavepoint+1)<=pTab->iSavepoint ){
+ pTab->p.pConfig->pgsz = 0;
+ rc = sqlite3Fts5StorageRollback(pTab->pStorage);
+ }
+ return rc;
}
/*
@@ -242636,7 +250548,7 @@ static void fts5SourceIdFunc(
){
assert( nArg==0 );
UNUSED_PARAM2(nArg, apUnused);
- sqlite3_result_text(pCtx, "fts5: 2023-05-16 12:36:15 831d0fb2836b71c9bc51067c49fee4b8f18047814f2ff22d817d25195cf350b0", -1, SQLITE_TRANSIENT);
+ sqlite3_result_text(pCtx, "fts5: 2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a", -1, SQLITE_TRANSIENT);
}
/*
@@ -242654,9 +250566,40 @@ static int fts5ShadowName(const char *zName){
return 0;
}
+/*
+** Run an integrity check on the FTS5 data structures. Return a string
+** if anything is found amiss. Return a NULL pointer if everything is
+** OK.
+*/
+static int fts5IntegrityMethod(
+ sqlite3_vtab *pVtab, /* the FTS5 virtual table to check */
+ const char *zSchema, /* Name of schema in which this table lives */
+ const char *zTabname, /* Name of the table itself */
+ int isQuick, /* True if this is a quick-check */
+ char **pzErr /* Write error message here */
+){
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ int rc;
+
+ assert( pzErr!=0 && *pzErr==0 );
+ UNUSED_PARAM(isQuick);
+ rc = sqlite3Fts5StorageIntegrity(pTab->pStorage, 0);
+ if( (rc&0xff)==SQLITE_CORRUPT ){
+ *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s",
+ zSchema, zTabname);
+ }else if( rc!=SQLITE_OK ){
+ *pzErr = sqlite3_mprintf("unable to validate the inverted index for"
+ " FTS5 table %s.%s: %s",
+ zSchema, zTabname, sqlite3_errstr(rc));
+ }
+ sqlite3Fts5IndexCloseReader(pTab->p.pIndex);
+
+ return SQLITE_OK;
+}
+
static int fts5Init(sqlite3 *db){
static const sqlite3_module fts5Mod = {
- /* iVersion */ 3,
+ /* iVersion */ 4,
/* xCreate */ fts5CreateMethod,
/* xConnect */ fts5ConnectMethod,
/* xBestIndex */ fts5BestIndexMethod,
@@ -242679,7 +250622,8 @@ static int fts5Init(sqlite3 *db){
/* xSavepoint */ fts5SavepointMethod,
/* xRelease */ fts5ReleaseMethod,
/* xRollbackTo */ fts5RollbackToMethod,
- /* xShadowName */ fts5ShadowName
+ /* xShadowName */ fts5ShadowName,
+ /* xIntegrity */ fts5IntegrityMethod
};
int rc;
@@ -242849,10 +250793,10 @@ static int fts5StorageGetStmt(
"INSERT INTO %Q.'%q_content' VALUES(%s)", /* INSERT_CONTENT */
"REPLACE INTO %Q.'%q_content' VALUES(%s)", /* REPLACE_CONTENT */
"DELETE FROM %Q.'%q_content' WHERE id=?", /* DELETE_CONTENT */
- "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", /* REPLACE_DOCSIZE */
+ "REPLACE INTO %Q.'%q_docsize' VALUES(?,?%s)", /* REPLACE_DOCSIZE */
"DELETE FROM %Q.'%q_docsize' WHERE id=?", /* DELETE_DOCSIZE */
- "SELECT sz FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */
+ "SELECT sz%s FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */
"REPLACE INTO %Q.'%q_config' VALUES(?,?)", /* REPLACE_CONFIG */
"SELECT %s FROM %s AS T", /* SCAN */
@@ -242900,6 +250844,19 @@ static int fts5StorageGetStmt(
break;
}
+ case FTS5_STMT_REPLACE_DOCSIZE:
+ zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName,
+ (pC->bContentlessDelete ? ",?" : "")
+ );
+ break;
+
+ case FTS5_STMT_LOOKUP_DOCSIZE:
+ zSql = sqlite3_mprintf(azStmt[eStmt],
+ (pC->bContentlessDelete ? ",origin" : ""),
+ pC->zDb, pC->zName
+ );
+ break;
+
default:
zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName);
break;
@@ -243089,9 +251046,11 @@ static int sqlite3Fts5StorageOpen(
}
if( rc==SQLITE_OK && pConfig->bColumnsize ){
- rc = sqlite3Fts5CreateTable(
- pConfig, "docsize", "id INTEGER PRIMARY KEY, sz BLOB", 0, pzErr
- );
+ const char *zCols = "id INTEGER PRIMARY KEY, sz BLOB";
+ if( pConfig->bContentlessDelete ){
+ zCols = "id INTEGER PRIMARY KEY, sz BLOB, origin INTEGER";
+ }
+ rc = sqlite3Fts5CreateTable(pConfig, "docsize", zCols, 0, pzErr);
}
if( rc==SQLITE_OK ){
rc = sqlite3Fts5CreateTable(
@@ -243168,7 +251127,7 @@ static int fts5StorageDeleteFromIndex(
){
Fts5Config *pConfig = p->pConfig;
sqlite3_stmt *pSeek = 0; /* SELECT to read row iDel from %_data */
- int rc; /* Return code */
+ int rc = SQLITE_OK; /* Return code */
int rc2; /* sqlite3_reset() return code */
int iCol;
Fts5InsertCtx ctx;
@@ -243184,7 +251143,6 @@ static int fts5StorageDeleteFromIndex(
ctx.pStorage = p;
ctx.iCol = -1;
- rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel);
for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){
if( pConfig->abUnindexed[iCol-1]==0 ){
const char *zText;
@@ -243221,6 +251179,37 @@ static int fts5StorageDeleteFromIndex(
return rc;
}
+/*
+** This function is called to process a DELETE on a contentless_delete=1
+** table. It adds the tombstone required to delete the entry with rowid
+** iDel. If successful, SQLITE_OK is returned. Or, if an error occurs,
+** an SQLite error code.
+*/
+static int fts5StorageContentlessDelete(Fts5Storage *p, i64 iDel){
+ i64 iOrigin = 0;
+ sqlite3_stmt *pLookup = 0;
+ int rc = SQLITE_OK;
+
+ assert( p->pConfig->bContentlessDelete );
+ assert( p->pConfig->eContent==FTS5_CONTENT_NONE );
+
+ /* Look up the origin of the document in the %_docsize table. Store
+ ** this in stack variable iOrigin. */
+ rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP_DOCSIZE, &pLookup, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pLookup, 1, iDel);
+ if( SQLITE_ROW==sqlite3_step(pLookup) ){
+ iOrigin = sqlite3_column_int64(pLookup, 1);
+ }
+ rc = sqlite3_reset(pLookup);
+ }
+
+ if( rc==SQLITE_OK && iOrigin!=0 ){
+ rc = sqlite3Fts5IndexContentlessDelete(p->pIndex, iOrigin, iDel);
+ }
+
+ return rc;
+}
/*
** Insert a record into the %_docsize table. Specifically, do:
@@ -243241,10 +251230,17 @@ static int fts5StorageInsertDocsize(
rc = fts5StorageGetStmt(p, FTS5_STMT_REPLACE_DOCSIZE, &pReplace, 0);
if( rc==SQLITE_OK ){
sqlite3_bind_int64(pReplace, 1, iRowid);
- sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC);
- sqlite3_step(pReplace);
- rc = sqlite3_reset(pReplace);
- sqlite3_bind_null(pReplace, 2);
+ if( p->pConfig->bContentlessDelete ){
+ i64 iOrigin = 0;
+ rc = sqlite3Fts5IndexGetOrigin(p->pIndex, &iOrigin);
+ sqlite3_bind_int64(pReplace, 3, iOrigin);
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC);
+ sqlite3_step(pReplace);
+ rc = sqlite3_reset(pReplace);
+ sqlite3_bind_null(pReplace, 2);
+ }
}
}
return rc;
@@ -243308,7 +251304,15 @@ static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **ap
/* Delete the index records */
if( rc==SQLITE_OK ){
- rc = fts5StorageDeleteFromIndex(p, iDel, apVal);
+ rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel);
+ }
+
+ if( rc==SQLITE_OK ){
+ if( p->pConfig->bContentlessDelete ){
+ rc = fts5StorageContentlessDelete(p, iDel);
+ }else{
+ rc = fts5StorageDeleteFromIndex(p, iDel, apVal);
+ }
}
/* Delete the %_docsize record */
@@ -243385,7 +251389,7 @@ static int sqlite3Fts5StorageRebuild(Fts5Storage *p){
}
if( rc==SQLITE_OK ){
- rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0);
+ rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, pConfig->pzErrmsg);
}
while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pScan) ){
@@ -243896,7 +251900,9 @@ static int sqlite3Fts5StorageSync(Fts5Storage *p){
i64 iLastRowid = sqlite3_last_insert_rowid(p->pConfig->db);
if( p->bTotalsValid ){
rc = fts5StorageSaveTotals(p);
- p->bTotalsValid = 0;
+ if( rc==SQLITE_OK ){
+ p->bTotalsValid = 0;
+ }
}
if( rc==SQLITE_OK ){
rc = sqlite3Fts5IndexSync(p->pIndex);
@@ -244170,6 +252176,12 @@ static const unsigned char sqlite3Utf8Trans1[] = {
#endif /* ifndef SQLITE_AMALGAMATION */
+#define FTS5_SKIP_UTF8(zIn) { \
+ if( ((unsigned char)(*(zIn++)))>=0xc0 ){ \
+ while( (((unsigned char)*zIn) & 0xc0)==0x80 ){ zIn++; } \
+ } \
+}
+
typedef struct Unicode61Tokenizer Unicode61Tokenizer;
struct Unicode61Tokenizer {
unsigned char aTokenChar[128]; /* ASCII range token characters */
@@ -245205,6 +253217,7 @@ static int fts5PorterTokenize(
typedef struct TrigramTokenizer TrigramTokenizer;
struct TrigramTokenizer {
int bFold; /* True to fold to lower-case */
+ int iFoldParam; /* Parameter to pass to Fts5UnicodeFold() */
};
/*
@@ -245231,6 +253244,7 @@ static int fts5TriCreate(
}else{
int i;
pNew->bFold = 1;
+ pNew->iFoldParam = 0;
for(i=0; rc==SQLITE_OK && i<nArg; i+=2){
const char *zArg = azArg[i+1];
if( 0==sqlite3_stricmp(azArg[i], "case_sensitive") ){
@@ -245239,10 +253253,21 @@ static int fts5TriCreate(
}else{
pNew->bFold = (zArg[0]=='0');
}
+ }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){
+ if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){
+ rc = SQLITE_ERROR;
+ }else{
+ pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0;
+ }
}else{
rc = SQLITE_ERROR;
}
}
+
+ if( pNew->iFoldParam!=0 && pNew->bFold==0 ){
+ rc = SQLITE_ERROR;
+ }
+
if( rc!=SQLITE_OK ){
fts5TriDelete((Fts5Tokenizer*)pNew);
pNew = 0;
@@ -245265,40 +253290,62 @@ static int fts5TriTokenize(
TrigramTokenizer *p = (TrigramTokenizer*)pTok;
int rc = SQLITE_OK;
char aBuf[32];
+ char *zOut = aBuf;
+ int ii;
const unsigned char *zIn = (const unsigned char*)pText;
const unsigned char *zEof = &zIn[nText];
u32 iCode;
+ int aStart[3]; /* Input offset of each character in aBuf[] */
UNUSED_PARAM(unusedFlags);
- while( 1 ){
- char *zOut = aBuf;
- int iStart = zIn - (const unsigned char*)pText;
- const unsigned char *zNext;
-
- READ_UTF8(zIn, zEof, iCode);
- if( iCode==0 ) break;
- zNext = zIn;
- if( zIn<zEof ){
- if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
- WRITE_UTF8(zOut, iCode);
+
+ /* Populate aBuf[] with the characters for the first trigram. */
+ for(ii=0; ii<3; ii++){
+ do {
+ aStart[ii] = zIn - (const unsigned char*)pText;
READ_UTF8(zIn, zEof, iCode);
- if( iCode==0 ) break;
- }else{
- break;
- }
- if( zIn<zEof ){
- if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
- WRITE_UTF8(zOut, iCode);
+ if( iCode==0 ) return SQLITE_OK;
+ if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam);
+ }while( iCode==0 );
+ WRITE_UTF8(zOut, iCode);
+ }
+
+ /* At the start of each iteration of this loop:
+ **
+ ** aBuf: Contains 3 characters. The 3 characters of the next trigram.
+ ** zOut: Points to the byte following the last character in aBuf.
+ ** aStart[3]: Contains the byte offset in the input text corresponding
+ ** to the start of each of the three characters in the buffer.
+ */
+ assert( zIn<=zEof );
+ while( 1 ){
+ int iNext; /* Start of character following current tri */
+ const char *z1;
+
+ /* Read characters from the input up until the first non-diacritic */
+ do {
+ iNext = zIn - (const unsigned char*)pText;
READ_UTF8(zIn, zEof, iCode);
if( iCode==0 ) break;
- if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
- WRITE_UTF8(zOut, iCode);
- }else{
- break;
- }
- rc = xToken(pCtx, 0, aBuf, zOut-aBuf, iStart, iStart + zOut-aBuf);
- if( rc!=SQLITE_OK ) break;
- zIn = zNext;
+ if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam);
+ }while( iCode==0 );
+
+ /* Pass the current trigram back to fts5 */
+ rc = xToken(pCtx, 0, aBuf, zOut-aBuf, aStart[0], iNext);
+ if( iCode==0 || rc!=SQLITE_OK ) break;
+
+ /* Remove the first character from buffer aBuf[]. Append the character
+ ** with codepoint iCode. */
+ z1 = aBuf;
+ FTS5_SKIP_UTF8(z1);
+ memmove(aBuf, z1, zOut - z1);
+ zOut -= (z1 - aBuf);
+ WRITE_UTF8(zOut, iCode);
+
+ /* Update the aStart[] array */
+ aStart[0] = aStart[1];
+ aStart[1] = aStart[2];
+ aStart[2] = iNext;
}
return rc;
@@ -245321,7 +253368,9 @@ static int sqlite3Fts5TokenizerPattern(
){
if( xCreate==fts5TriCreate ){
TrigramTokenizer *p = (TrigramTokenizer*)pTok;
- return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB;
+ if( p->iFoldParam==0 ){
+ return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB;
+ }
}
return FTS5_PATTERN_NONE;
}
@@ -247110,7 +255159,7 @@ static int fts5VocabFilterMethod(
if( pEq ){
zTerm = (const char *)sqlite3_value_text(pEq);
nTerm = sqlite3_value_bytes(pEq);
- f = 0;
+ f = FTS5INDEX_QUERY_NOTOKENDATA;
}else{
if( pGe ){
zTerm = (const char *)sqlite3_value_text(pGe);
@@ -247264,7 +255313,8 @@ static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){
/* xSavepoint */ 0,
/* xRelease */ 0,
/* xRollbackTo */ 0,
- /* xShadowName */ 0
+ /* xShadowName */ 0,
+ /* xIntegrity */ 0
};
void *p = (void*)pGlobal;
@@ -247593,6 +255643,7 @@ static sqlite3_module stmtModule = {
0, /* xRelease */
0, /* xRollbackTo */
0, /* xShadowName */
+ 0 /* xIntegrity */
};
#endif /* SQLITE_OMIT_VIRTUALTABLE */
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.h b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.h
index 57eac0a..a07a519 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.h
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.h
@@ -147,9 +147,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.42.0"
-#define SQLITE_VERSION_NUMBER 3042000
-#define SQLITE_SOURCE_ID "2023-05-16 12:36:15 831d0fb2836b71c9bc51067c49fee4b8f18047814f2ff22d817d25195cf350b0"
+#define SQLITE_VERSION "3.45.1"
+#define SQLITE_VERSION_NUMBER 3045001
+#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -529,6 +529,7 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8))
#define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8))
#define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8))
+#define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8))
#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
#define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8))
#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
@@ -1191,7 +1192,7 @@ struct sqlite3_io_methods {
** by clients within the current process, only within other processes.
**
** <li>[[SQLITE_FCNTL_CKSM_FILE]]
-** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use interally by the
+** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use internally by the
** [checksum VFS shim] only.
**
** <li>[[SQLITE_FCNTL_RESET_CACHE]]
@@ -2127,7 +2128,7 @@ struct sqlite3_mem_methods {
** is stored in each sorted record and the required column values loaded
** from the database as records are returned in sorted order. The default
** value for this option is to never use this optimization. Specifying a
-** negative value for this option restores the default behaviour.
+** negative value for this option restores the default behavior.
** This option is only available if SQLite is compiled with the
** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option.
**
@@ -2302,7 +2303,7 @@ struct sqlite3_mem_methods {
** database handle, SQLite checks if this will mean that there are now no
** connections at all to the database. If so, it performs a checkpoint
** operation before closing the connection. This option may be used to
-** override this behaviour. The first parameter passed to this operation
+** override this behavior. The first parameter passed to this operation
** is an integer - positive to disable checkpoints-on-close, or zero (the
** default) to enable them, and negative to leave the setting unchanged.
** The second parameter is a pointer to an integer
@@ -2455,7 +2456,7 @@ struct sqlite3_mem_methods {
** the [VACUUM] command will fail with an obscure error when attempting to
** process a table with generated columns and a descending index. This is
** not considered a bug since SQLite versions 3.3.0 and earlier do not support
-** either generated columns or decending indexes.
+** either generated columns or descending indexes.
** </dd>
**
** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]]
@@ -2736,6 +2737,7 @@ SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*);
**
** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether
** or not an interrupt is currently in effect for [database connection] D.
+** It returns 1 if an interrupt is currently in effect, or 0 otherwise.
*/
SQLITE_API void sqlite3_interrupt(sqlite3*);
SQLITE_API int sqlite3_is_interrupted(sqlite3*);
@@ -3389,8 +3391,10 @@ SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*,
** M argument should be the bitwise OR-ed combination of
** zero or more [SQLITE_TRACE] constants.
**
-** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides
-** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2().
+** ^Each call to either sqlite3_trace(D,X,P) or sqlite3_trace_v2(D,M,X,P)
+** overrides (cancels) all prior calls to sqlite3_trace(D,X,P) or
+** sqlite3_trace_v2(D,M,X,P) for the [database connection] D. Each
+** database connection may have at most one trace callback.
**
** ^The X callback is invoked whenever any of the events identified by
** mask M occur. ^The integer return value from the callback is currently
@@ -3759,7 +3763,7 @@ SQLITE_API int sqlite3_open_v2(
** as F) must be one of:
** <ul>
** <li> A database filename pointer created by the SQLite core and
-** passed into the xOpen() method of a VFS implemention, or
+** passed into the xOpen() method of a VFS implementation, or
** <li> A filename obtained from [sqlite3_db_filename()], or
** <li> A new filename constructed using [sqlite3_create_filename()].
** </ul>
@@ -3872,7 +3876,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*);
/*
** CAPI3REF: Create and Destroy VFS Filenames
**
-** These interfces are provided for use by [VFS shim] implementations and
+** These interfaces are provided for use by [VFS shim] implementations and
** are not useful outside of that context.
**
** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of
@@ -3951,14 +3955,17 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename);
** </ul>
**
** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
-** text that describes the error, as either UTF-8 or UTF-16 respectively.
+** text that describes the error, as either UTF-8 or UTF-16 respectively,
+** or NULL if no error message is available.
+** (See how SQLite handles [invalid UTF] for exceptions to this rule.)
** ^(Memory to hold the error message string is managed internally.
** The application does not need to worry about freeing the result.
** However, the error string might be overwritten or deallocated by
** subsequent calls to other SQLite interface functions.)^
**
-** ^The sqlite3_errstr() interface returns the English-language text
-** that describes the [result code], as UTF-8.
+** ^The sqlite3_errstr(E) interface returns the English-language text
+** that describes the [result code] E, as UTF-8, or NULL if E is not an
+** result code for which a text error message is available.
** ^(Memory to hold the error message string is managed internally
** and must not be freed by the application)^.
**
@@ -4420,6 +4427,41 @@ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt);
/*
+** CAPI3REF: Change The EXPLAIN Setting For A Prepared Statement
+** METHOD: sqlite3_stmt
+**
+** The sqlite3_stmt_explain(S,E) interface changes the EXPLAIN
+** setting for [prepared statement] S. If E is zero, then S becomes
+** a normal prepared statement. If E is 1, then S behaves as if
+** its SQL text began with "[EXPLAIN]". If E is 2, then S behaves as if
+** its SQL text began with "[EXPLAIN QUERY PLAN]".
+**
+** Calling sqlite3_stmt_explain(S,E) might cause S to be reprepared.
+** SQLite tries to avoid a reprepare, but a reprepare might be necessary
+** on the first transition into EXPLAIN or EXPLAIN QUERY PLAN mode.
+**
+** Because of the potential need to reprepare, a call to
+** sqlite3_stmt_explain(S,E) will fail with SQLITE_ERROR if S cannot be
+** reprepared because it was created using [sqlite3_prepare()] instead of
+** the newer [sqlite3_prepare_v2()] or [sqlite3_prepare_v3()] interfaces and
+** hence has no saved SQL text with which to reprepare.
+**
+** Changing the explain setting for a prepared statement does not change
+** the original SQL text for the statement. Hence, if the SQL text originally
+** began with EXPLAIN or EXPLAIN QUERY PLAN, but sqlite3_stmt_explain(S,0)
+** is called to convert the statement into an ordinary statement, the EXPLAIN
+** or EXPLAIN QUERY PLAN keywords will still appear in the sqlite3_sql(S)
+** output, even though the statement now acts like a normal SQL statement.
+**
+** This routine returns SQLITE_OK if the explain mode is successfully
+** changed, or an error code if the explain mode could not be changed.
+** The explain mode cannot be changed while a statement is active.
+** Hence, it is good practice to call [sqlite3_reset(S)]
+** immediately prior to calling sqlite3_stmt_explain(S,E).
+*/
+SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode);
+
+/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
** METHOD: sqlite3_stmt
**
@@ -4582,7 +4624,7 @@ typedef struct sqlite3_context sqlite3_context;
** with it may be passed. ^It is called to dispose of the BLOB or string even
** if the call to the bind API fails, except the destructor is not called if
** the third parameter is a NULL pointer or the fourth parameter is negative.
-** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that
+** ^ (2) The special constant, [SQLITE_STATIC], may be passed to indicate that
** the application remains responsible for disposing of the object. ^In this
** case, the object and the provided pointer to it must remain valid until
** either the prepared statement is finalized or the same SQL parameter is
@@ -5261,20 +5303,33 @@ SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt);
** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S
** back to the beginning of its program.
**
-** ^If the most recent call to [sqlite3_step(S)] for the
-** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE],
-** or if [sqlite3_step(S)] has never before been called on S,
-** then [sqlite3_reset(S)] returns [SQLITE_OK].
+** ^The return code from [sqlite3_reset(S)] indicates whether or not
+** the previous evaluation of prepared statement S completed successfully.
+** ^If [sqlite3_step(S)] has never before been called on S or if
+** [sqlite3_step(S)] has not been called since the previous call
+** to [sqlite3_reset(S)], then [sqlite3_reset(S)] will return
+** [SQLITE_OK].
**
** ^If the most recent call to [sqlite3_step(S)] for the
** [prepared statement] S indicated an error, then
** [sqlite3_reset(S)] returns an appropriate [error code].
+** ^The [sqlite3_reset(S)] interface might also return an [error code]
+** if there were no prior errors but the process of resetting
+** the prepared statement caused a new error. ^For example, if an
+** [INSERT] statement with a [RETURNING] clause is only stepped one time,
+** that one call to [sqlite3_step(S)] might return SQLITE_ROW but
+** the overall statement might still fail and the [sqlite3_reset(S)] call
+** might return SQLITE_BUSY if locking constraints prevent the
+** database change from committing. Therefore, it is important that
+** applications check the return code from [sqlite3_reset(S)] even if
+** no prior call to [sqlite3_step(S)] indicated a problem.
**
** ^The [sqlite3_reset(S)] interface does not change the values
** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
*/
SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
+
/*
** CAPI3REF: Create Or Redefine SQL Functions
** KEYWORDS: {function creation routines}
@@ -5485,7 +5540,7 @@ SQLITE_API int sqlite3_create_window_function(
** [application-defined SQL function]
** that has side-effects or that could potentially leak sensitive information.
** This will prevent attacks in which an application is tricked
-** into using a database file that has had its schema surreptiously
+** into using a database file that has had its schema surreptitiously
** modified to invoke the application-defined function in ways that are
** harmful.
** <p>
@@ -5521,13 +5576,27 @@ SQLITE_API int sqlite3_create_window_function(
** </dd>
**
** [[SQLITE_SUBTYPE]] <dt>SQLITE_SUBTYPE</dt><dd>
-** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call
+** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call
** [sqlite3_value_subtype()] to inspect the sub-types of its arguments.
-** Specifying this flag makes no difference for scalar or aggregate user
-** functions. However, if it is not specified for a user-defined window
-** function, then any sub-types belonging to arguments passed to the window
-** function may be discarded before the window function is called (i.e.
-** sqlite3_value_subtype() will always return 0).
+** This flag instructs SQLite to omit some corner-case optimizations that
+** might disrupt the operation of the [sqlite3_value_subtype()] function,
+** causing it to return zero rather than the correct subtype().
+** SQL functions that invokes [sqlite3_value_subtype()] should have this
+** property. If the SQLITE_SUBTYPE property is omitted, then the return
+** value from [sqlite3_value_subtype()] might sometimes be zero even though
+** a non-zero subtype was specified by the function argument expression.
+**
+** [[SQLITE_RESULT_SUBTYPE]] <dt>SQLITE_RESULT_SUBTYPE</dt><dd>
+** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call
+** [sqlite3_result_subtype()] to cause a sub-type to be associated with its
+** result.
+** Every function that invokes [sqlite3_result_subtype()] should have this
+** property. If it does not, then the call to [sqlite3_result_subtype()]
+** might become a no-op if the function is used as term in an
+** [expression index]. On the other hand, SQL functions that never invoke
+** [sqlite3_result_subtype()] should avoid setting this property, as the
+** purpose of this property is to disable certain optimizations that are
+** incompatible with subtypes.
** </dd>
** </dl>
*/
@@ -5535,6 +5604,7 @@ SQLITE_API int sqlite3_create_window_function(
#define SQLITE_DIRECTONLY 0x000080000
#define SQLITE_SUBTYPE 0x000100000
#define SQLITE_INNOCUOUS 0x000200000
+#define SQLITE_RESULT_SUBTYPE 0x001000000
/*
** CAPI3REF: Deprecated Functions
@@ -5731,6 +5801,12 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*);
** information can be used to pass a limited amount of context from
** one SQL function to another. Use the [sqlite3_result_subtype()]
** routine to set the subtype for the return value of an SQL function.
+**
+** Every [application-defined SQL function] that invoke this interface
+** should include the [SQLITE_SUBTYPE] property in the text
+** encoding argument when the function is [sqlite3_create_function|registered].
+** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype()
+** might return zero instead of the upstream subtype in some corner cases.
*/
SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);
@@ -5829,48 +5905,56 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
** METHOD: sqlite3_context
**
** These functions may be used by (non-aggregate) SQL functions to
-** associate metadata with argument values. If the same value is passed to
-** multiple invocations of the same SQL function during query execution, under
-** some circumstances the associated metadata may be preserved. An example
-** of where this might be useful is in a regular-expression matching
-** function. The compiled version of the regular expression can be stored as
-** metadata associated with the pattern string.
+** associate auxiliary data with argument values. If the same argument
+** value is passed to multiple invocations of the same SQL function during
+** query execution, under some circumstances the associated auxiliary data
+** might be preserved. An example of where this might be useful is in a
+** regular-expression matching function. The compiled version of the regular
+** expression can be stored as auxiliary data associated with the pattern string.
** Then as long as the pattern string remains the same,
** the compiled regular expression can be reused on multiple
** invocations of the same function.
**
-** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata
+** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the auxiliary data
** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument
** value to the application-defined function. ^N is zero for the left-most
-** function argument. ^If there is no metadata
+** function argument. ^If there is no auxiliary data
** associated with the function argument, the sqlite3_get_auxdata(C,N) interface
** returns a NULL pointer.
**
-** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th
-** argument of the application-defined function. ^Subsequent
+** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as auxiliary data for the
+** N-th argument of the application-defined function. ^Subsequent
** calls to sqlite3_get_auxdata(C,N) return P from the most recent
-** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or
-** NULL if the metadata has been discarded.
+** sqlite3_set_auxdata(C,N,P,X) call if the auxiliary data is still valid or
+** NULL if the auxiliary data has been discarded.
** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL,
** SQLite will invoke the destructor function X with parameter P exactly
-** once, when the metadata is discarded.
-** SQLite is free to discard the metadata at any time, including: <ul>
+** once, when the auxiliary data is discarded.
+** SQLite is free to discard the auxiliary data at any time, including: <ul>
** <li> ^(when the corresponding function parameter changes)^, or
** <li> ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the
** SQL statement)^, or
** <li> ^(when sqlite3_set_auxdata() is invoked again on the same
** parameter)^, or
** <li> ^(during the original sqlite3_set_auxdata() call when a memory
-** allocation error occurs.)^ </ul>
+** allocation error occurs.)^
+** <li> ^(during the original sqlite3_set_auxdata() call if the function
+** is evaluated during query planning instead of during query execution,
+** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ </ul>
**
-** Note the last bullet in particular. The destructor X in
+** Note the last two bullets in particular. The destructor X in
** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the
** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata()
** should be called near the end of the function implementation and the
** function implementation should not make any use of P after
-** sqlite3_set_auxdata() has been called.
-**
-** ^(In practice, metadata is preserved between function calls for
+** sqlite3_set_auxdata() has been called. Furthermore, a call to
+** sqlite3_get_auxdata() that occurs immediately after a corresponding call
+** to sqlite3_set_auxdata() might still return NULL if an out-of-memory
+** condition occurred during the sqlite3_set_auxdata() call or if the
+** function is being evaluated during query planning rather than during
+** query execution.
+**
+** ^(In practice, auxiliary data is preserved between function calls for
** function parameters that are compile-time constants, including literal
** values and [parameters] and expressions composed from the same.)^
**
@@ -5880,10 +5964,67 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
**
** These routines must be called from the same thread in which
** the SQL function is running.
+**
+** See also: [sqlite3_get_clientdata()] and [sqlite3_set_clientdata()].
*/
SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N);
SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
+/*
+** CAPI3REF: Database Connection Client Data
+** METHOD: sqlite3
+**
+** These functions are used to associate one or more named pointers
+** with a [database connection].
+** A call to sqlite3_set_clientdata(D,N,P,X) causes the pointer P
+** to be attached to [database connection] D using name N. Subsequent
+** calls to sqlite3_get_clientdata(D,N) will return a copy of pointer P
+** or a NULL pointer if there were no prior calls to
+** sqlite3_set_clientdata() with the same values of D and N.
+** Names are compared using strcmp() and are thus case sensitive.
+**
+** If P and X are both non-NULL, then the destructor X is invoked with
+** argument P on the first of the following occurrences:
+** <ul>
+** <li> An out-of-memory error occurs during the call to
+** sqlite3_set_clientdata() which attempts to register pointer P.
+** <li> A subsequent call to sqlite3_set_clientdata(D,N,P,X) is made
+** with the same D and N parameters.
+** <li> The database connection closes. SQLite does not make any guarantees
+** about the order in which destructors are called, only that all
+** destructors will be called exactly once at some point during the
+** database connection closing process.
+** </ul>
+**
+** SQLite does not do anything with client data other than invoke
+** destructors on the client data at the appropriate time. The intended
+** use for client data is to provide a mechanism for wrapper libraries
+** to store additional information about an SQLite database connection.
+**
+** There is no limit (other than available memory) on the number of different
+** client data pointers (with different names) that can be attached to a
+** single database connection. However, the implementation is optimized
+** for the case of having only one or two different client data names.
+** Applications and wrapper libraries are discouraged from using more than
+** one client data name each.
+**
+** There is no way to enumerate the client data pointers
+** associated with a database connection. The N parameter can be thought
+** of as a secret key such that only code that knows the secret key is able
+** to access the associated data.
+**
+** Security Warning: These interfaces should not be exposed in scripting
+** languages or in other circumstances where it might be possible for an
+** an attacker to invoke them. Any agent that can invoke these interfaces
+** can probably also take control of the process.
+**
+** Database connection client data is only available for SQLite
+** version 3.44.0 ([dateof:3.44.0]) and later.
+**
+** See also: [sqlite3_set_auxdata()] and [sqlite3_get_auxdata()].
+*/
+SQLITE_API void *sqlite3_get_clientdata(sqlite3*,const char*);
+SQLITE_API int sqlite3_set_clientdata(sqlite3*, const char*, void*, void(*)(void*));
/*
** CAPI3REF: Constants Defining Special Destructor Behavior
@@ -6085,6 +6226,20 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
** higher order bits are discarded.
** The number of subtype bytes preserved by SQLite might increase
** in future releases of SQLite.
+**
+** Every [application-defined SQL function] that invokes this interface
+** should include the [SQLITE_RESULT_SUBTYPE] property in its
+** text encoding argument when the SQL function is
+** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE]
+** property is omitted from the function that invokes sqlite3_result_subtype(),
+** then in some cases the sqlite3_result_subtype() might fail to set
+** the result subtype.
+**
+** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any
+** SQL function that invokes the sqlite3_result_subtype() interface
+** and that does not have the SQLITE_RESULT_SUBTYPE property will raise
+** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1
+** by default.
*/
SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);
@@ -6516,7 +6671,7 @@ SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema);
/*
-** CAPI3REF: Allowed return values from [sqlite3_txn_state()]
+** CAPI3REF: Allowed return values from sqlite3_txn_state()
** KEYWORDS: {transaction state}
**
** These constants define the current transaction state of a database file.
@@ -6648,7 +6803,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
** ^Each call to the sqlite3_autovacuum_pages() interface overrides all
** previous invocations for that database connection. ^If the callback
** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer,
-** then the autovacuum steps callback is cancelled. The return value
+** then the autovacuum steps callback is canceled. The return value
** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might
** be some other error code if something goes wrong. The current
** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other
@@ -7167,6 +7322,10 @@ struct sqlite3_module {
/* The methods above are in versions 1 and 2 of the sqlite_module object.
** Those below are for version 3 and greater. */
int (*xShadowName)(const char*);
+ /* The methods above are in versions 1 through 3 of the sqlite_module object.
+ ** Those below are for version 4 and greater. */
+ int (*xIntegrity)(sqlite3_vtab *pVTab, const char *zSchema,
+ const char *zTabName, int mFlags, char **pzErr);
};
/*
@@ -7654,7 +7813,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
** code is returned and the transaction rolled back.
**
** Calling this function with an argument that is not a NULL pointer or an
-** open blob handle results in undefined behaviour. ^Calling this routine
+** open blob handle results in undefined behavior. ^Calling this routine
** with a null pointer (such as would be returned by a failed call to
** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function
** is passed a valid open blob handle, the values returned by the
@@ -7881,9 +8040,11 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*);
**
** ^(Some systems (for example, Windows 95) do not support the operation
** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try()
-** will always return SQLITE_BUSY. The SQLite core only ever uses
-** sqlite3_mutex_try() as an optimization so this is acceptable
-** behavior.)^
+** will always return SQLITE_BUSY. In most cases the SQLite core only uses
+** sqlite3_mutex_try() as an optimization, so this is acceptable
+** behavior. The exceptions are unix builds that set the
+** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working
+** sqlite3_mutex_try() is required.)^
**
** ^The sqlite3_mutex_leave() routine exits a mutex that was
** previously entered by the same thread. The behavior
@@ -8134,6 +8295,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_PRNG_SAVE 5
#define SQLITE_TESTCTRL_PRNG_RESTORE 6
#define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */
+#define SQLITE_TESTCTRL_FK_NO_ACTION 7
#define SQLITE_TESTCTRL_BITVEC_TEST 8
#define SQLITE_TESTCTRL_FAULT_INSTALL 9
#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10
@@ -8141,6 +8303,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_ASSERT 12
#define SQLITE_TESTCTRL_ALWAYS 13
#define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */
+#define SQLITE_TESTCTRL_JSON_SELFCHECK 14
#define SQLITE_TESTCTRL_OPTIMIZATIONS 15
#define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */
@@ -8162,7 +8325,8 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_TRACEFLAGS 31
#define SQLITE_TESTCTRL_TUNE 32
#define SQLITE_TESTCTRL_LOGEST 33
-#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */
+#define SQLITE_TESTCTRL_USELONGDOUBLE 34
+#define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */
/*
** CAPI3REF: SQL Keyword Checking
@@ -9618,7 +9782,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
** [[SQLITE_VTAB_DIRECTONLY]]<dt>SQLITE_VTAB_DIRECTONLY</dt>
** <dd>Calls of the form
** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the
-** the [xConnect] or [xCreate] methods of a [virtual table] implmentation
+** the [xConnect] or [xCreate] methods of a [virtual table] implementation
** prohibits that virtual table from being used from within triggers and
** views.
** </dd>
@@ -9808,7 +9972,7 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info*);
** communicated to the xBestIndex method as a
** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use
** this constraint, it must set the corresponding
-** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under
+** aConstraintUsage[].argvIndex to a positive integer. ^(Then, under
** the usual mode of handling IN operators, SQLite generates [bytecode]
** that invokes the [xFilter|xFilter() method] once for each value
** on the right-hand side of the IN operator.)^ Thus the virtual table
@@ -10237,7 +10401,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
** When the [sqlite3_blob_write()] API is used to update a blob column,
** the pre-update hook is invoked with SQLITE_DELETE. This is because the
** in this case the new values are not available. In this case, when a
-** callback made with op==SQLITE_DELETE is actuall a write using the
+** callback made with op==SQLITE_DELETE is actually a write using the
** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns
** the index of the column being written. In other cases, where the
** pre-update hook is being invoked for some other reason, including a
@@ -10498,6 +10662,13 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c
** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy
** of the database exists.
**
+** After the call, if the SQLITE_SERIALIZE_NOCOPY bit had been set,
+** the returned buffer content will remain accessible and unchanged
+** until either the next write operation on the connection or when
+** the connection is closed, and applications must not modify the
+** buffer. If the bit had been clear, the returned buffer will not
+** be accessed by SQLite after the call.
+**
** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the
** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory
** allocation error occurs.
@@ -10546,6 +10717,9 @@ SQLITE_API unsigned char *sqlite3_serialize(
** SQLite will try to increase the buffer size using sqlite3_realloc64()
** if writes on the database cause it to grow larger than M bytes.
**
+** Applications must not modify the buffer P or invalidate it before
+** the database connection D is closed.
+**
** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the
** database is currently in a read transaction or is involved in a backup
** operation.
@@ -10554,6 +10728,13 @@ SQLITE_API unsigned char *sqlite3_serialize(
** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the
** function returns SQLITE_ERROR.
**
+** The deserialized database should not be in [WAL mode]. If the database
+** is in WAL mode, then any attempt to use the database file will result
+** in an [SQLITE_CANTOPEN] error. The application can set the
+** [file format version numbers] (bytes 18 and 19) of the input database P
+** to 0x01 prior to invoking sqlite3_deserialize(D,S,P,N,M,F) to force the
+** database file into rollback mode and work around this limitation.
+**
** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the
** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then
** [sqlite3_free()] is invoked on argument P prior to returning.
@@ -11627,6 +11808,18 @@ SQLITE_API int sqlite3changeset_concat(
/*
+** CAPI3REF: Upgrade the Schema of a Changeset/Patchset
+*/
+SQLITE_API int sqlite3changeset_upgrade(
+ sqlite3 *db,
+ const char *zDb,
+ int nIn, const void *pIn, /* Input changeset */
+ int *pnOut, void **ppOut /* OUT: Inverse of input */
+);
+
+
+
+/*
** CAPI3REF: Changegroup Handle
**
** A changegroup is an object used to combine two or more
@@ -11673,6 +11866,38 @@ typedef struct sqlite3_changegroup sqlite3_changegroup;
SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
/*
+** CAPI3REF: Add a Schema to a Changegroup
+** METHOD: sqlite3_changegroup_schema
+**
+** This method may be used to optionally enforce the rule that the changesets
+** added to the changegroup handle must match the schema of database zDb
+** ("main", "temp", or the name of an attached database). If
+** sqlite3changegroup_add() is called to add a changeset that is not compatible
+** with the configured schema, SQLITE_SCHEMA is returned and the changegroup
+** object is left in an undefined state.
+**
+** A changeset schema is considered compatible with the database schema in
+** the same way as for sqlite3changeset_apply(). Specifically, for each
+** table in the changeset, there exists a database table with:
+**
+** <ul>
+** <li> The name identified by the changeset, and
+** <li> at least as many columns as recorded in the changeset, and
+** <li> the primary key columns in the same position as recorded in
+** the changeset.
+** </ul>
+**
+** The output of the changegroup object always has the same schema as the
+** database nominated using this function. In cases where changesets passed
+** to sqlite3changegroup_add() have fewer columns than the corresponding table
+** in the database schema, these are filled in using the default column
+** values from the database schema. This makes it possible to combined
+** changesets that have different numbers of columns for a single table
+** within a changegroup, provided that they are otherwise compatible.
+*/
+SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const char *zDb);
+
+/*
** CAPI3REF: Add A Changeset To A Changegroup
** METHOD: sqlite3_changegroup
**
@@ -11740,13 +11965,18 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
** If the new changeset contains changes to a table that is already present
** in the changegroup, then the number of columns and the position of the
** primary key columns for the table must be consistent. If this is not the
-** case, this function fails with SQLITE_SCHEMA. If the input changeset
-** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is
-** returned. Or, if an out-of-memory condition occurs during processing, this
-** function returns SQLITE_NOMEM. In all cases, if an error occurs the state
-** of the final contents of the changegroup is undefined.
+** case, this function fails with SQLITE_SCHEMA. Except, if the changegroup
+** object has been configured with a database schema using the
+** sqlite3changegroup_schema() API, then it is possible to combine changesets
+** with different numbers of columns for a single table, provided that
+** they are otherwise compatible.
+**
+** If the input changeset appears to be corrupt and the corruption is
+** detected, SQLITE_CORRUPT is returned. Or, if an out-of-memory condition
+** occurs during processing, this function returns SQLITE_NOMEM.
**
-** If no error occurs, SQLITE_OK is returned.
+** In all cases, if an error occurs the state of the final contents of the
+** changegroup is undefined. If no error occurs, SQLITE_OK is returned.
*/
SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
@@ -12011,10 +12241,17 @@ SQLITE_API int sqlite3changeset_apply_v2(
** <li>an insert change if all fields of the conflicting row match
** the row being inserted.
** </ul>
+**
+** <dt>SQLITE_CHANGESETAPPLY_FKNOACTION <dd>
+** If this flag it set, then all foreign key constraints in the target
+** database behave as if they were declared with "ON UPDATE NO ACTION ON
+** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL
+** or SET DEFAULT.
*/
#define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001
#define SQLITE_CHANGESETAPPLY_INVERT 0x0002
#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004
+#define SQLITE_CHANGESETAPPLY_FKNOACTION 0x0008
/*
** CAPI3REF: Constants Passed To The Conflict Handler
@@ -12580,8 +12817,11 @@ struct Fts5PhraseIter {
** created with the "columnsize=0" option.
**
** xColumnText:
-** This function attempts to retrieve the text of column iCol of the
-** current document. If successful, (*pz) is set to point to a buffer
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of columns in the table, SQLITE_RANGE is returned.
+**
+** Otherwise, this function attempts to retrieve the text of column iCol of
+** the current document. If successful, (*pz) is set to point to a buffer
** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
** if an error occurs, an SQLite error code is returned and the final values
@@ -12591,8 +12831,10 @@ struct Fts5PhraseIter {
** Returns the number of phrases in the current query expression.
**
** xPhraseSize:
-** Returns the number of tokens in phrase iPhrase of the query. Phrases
-** are numbered starting from zero.
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of phrases in the current query, as returned by xPhraseCount,
+** 0 is returned. Otherwise, this function returns the number of tokens in
+** phrase iPhrase of the query. Phrases are numbered starting from zero.
**
** xInstCount:
** Set *pnInst to the total number of occurrences of all phrases within
@@ -12608,12 +12850,13 @@ struct Fts5PhraseIter {
** Query for the details of phrase match iIdx within the current row.
** Phrase matches are numbered starting from zero, so the iIdx argument
** should be greater than or equal to zero and smaller than the value
-** output by xInstCount().
+** output by xInstCount(). If iIdx is less than zero or greater than
+** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned.
**
-** Usually, output parameter *piPhrase is set to the phrase number, *piCol
+** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol
** to the column in which it occurs and *piOff the token offset of the
-** first token of the phrase. Returns SQLITE_OK if successful, or an error
-** code (i.e. SQLITE_NOMEM) if an error occurs.
+** first token of the phrase. SQLITE_OK is returned if successful, or an
+** error code (i.e. SQLITE_NOMEM) if an error occurs.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
@@ -12639,6 +12882,10 @@ struct Fts5PhraseIter {
** Invoking Api.xUserData() returns a copy of the pointer passed as
** the third argument to pUserData.
**
+** If parameter iPhrase is less than zero, or greater than or equal to
+** the number of phrases in the query, as returned by xPhraseCount(),
+** this function returns SQLITE_RANGE.
+**
** If the callback function returns any value other than SQLITE_OK, the
** query is abandoned and the xQueryPhrase function returns immediately.
** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
@@ -12753,6 +13000,39 @@ struct Fts5PhraseIter {
**
** xPhraseNextColumn()
** See xPhraseFirstColumn above.
+**
+** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase iPhrase of the current
+** query. Before returning, output parameter *ppToken is set to point
+** to a buffer containing the requested token, and *pnToken to the
+** size of this buffer in bytes.
+**
+** If iPhrase or iToken are less than zero, or if iPhrase is greater than
+** or equal to the number of phrases in the query as reported by
+** xPhraseCount(), or if iToken is equal to or greater than the number of
+** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken
+ are both zeroed.
+**
+** The output text is not a copy of the query text that specified the
+** token. It is the output of the tokenizer module. For tokendata=1
+** tables, this includes any embedded 0x00 and trailing data.
+**
+** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase hit iIdx within the
+** current row. If iIdx is less than zero or greater than or equal to the
+** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise,
+** output variable (*ppToken) is set to point to a buffer containing the
+** matching document token, and (*pnToken) to the size of that buffer in
+** bytes. This API is not available if the specified token matches a
+** prefix query term. In that case both output variables are always set
+** to 0.
+**
+** The output text is not a copy of the document text that was tokenized.
+** It is the output of the tokenizer module. For tokendata=1 tables, this
+** includes any embedded 0x00 and trailing data.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option.
*/
struct Fts5ExtensionApi {
int iVersion; /* Currently always set to 3 */
@@ -12790,6 +13070,13 @@ struct Fts5ExtensionApi {
int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
+
+ /* Below this point are iVersion>=3 only */
+ int (*xQueryToken)(Fts5Context*,
+ int iPhrase, int iToken,
+ const char **ppToken, int *pnToken
+ );
+ int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*);
};
/*
@@ -12984,8 +13271,8 @@ struct Fts5ExtensionApi {
** as separate queries of the FTS index are required for each synonym.
**
** When using methods (2) or (3), it is important that the tokenizer only
-** provide synonyms when tokenizing document text (method (2)) or query
-** text (method (3)), not both. Doing so will not cause any errors, but is
+** provide synonyms when tokenizing document text (method (3)) or query
+** text (method (2)), not both. Doing so will not cause any errors, but is
** inefficient.
*/
typedef struct Fts5Tokenizer Fts5Tokenizer;
@@ -13033,7 +13320,7 @@ struct fts5_api {
int (*xCreateTokenizer)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_tokenizer *pTokenizer,
void (*xDestroy)(void*)
);
@@ -13042,7 +13329,7 @@ struct fts5_api {
int (*xFindTokenizer)(
fts5_api *pApi,
const char *zName,
- void **ppContext,
+ void **ppUserData,
fts5_tokenizer *pTokenizer
);
@@ -13050,7 +13337,7 @@ struct fts5_api {
int (*xCreateFunction)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_extension_function xFunction,
void (*xDestroy)(void*)
);
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3.go
index 5e4e2ff..4b3b6ca 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3.go
@@ -21,7 +21,6 @@ package sqlite3
#cgo CFLAGS: -DSQLITE_DEFAULT_WAL_SYNCHRONOUS=1
#cgo CFLAGS: -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT
#cgo CFLAGS: -Wno-deprecated-declarations
-#cgo linux,!android CFLAGS: -DHAVE_PREAD64=1 -DHAVE_PWRITE64=1
#cgo openbsd CFLAGS: -I/usr/local/include
#cgo openbsd LDFLAGS: -L/usr/local/lib
#ifndef USE_LIBSQLITE3
@@ -48,6 +47,18 @@ package sqlite3
# define SQLITE_DETERMINISTIC 0
#endif
+#if defined(HAVE_PREAD64) && defined(HAVE_PWRITE64)
+# undef USE_PREAD
+# undef USE_PWRITE
+# define USE_PREAD64 1
+# define USE_PWRITE64 1
+#elif defined(HAVE_PREAD) && defined(HAVE_PWRITE)
+# undef USE_PREAD
+# undef USE_PWRITE
+# define USE_PREAD64 1
+# define USE_PWRITE64 1
+#endif
+
static int
_sqlite3_open_v2(const char *filename, sqlite3 **ppDb, int flags, const char *zVfs) {
#ifdef SQLITE_OPEN_URI
@@ -596,10 +607,9 @@ func (c *SQLiteConn) RegisterAuthorizer(callback func(int, string, string, strin
// RegisterFunc makes a Go function available as a SQLite function.
//
// The Go function can have arguments of the following types: any
-// numeric type except complex, bool, []byte, string and
-// interface{}. interface{} arguments are given the direct translation
-// of the SQLite data type: int64 for INTEGER, float64 for FLOAT,
-// []byte for BLOB, string for TEXT.
+// numeric type except complex, bool, []byte, string and any.
+// any arguments are given the direct translation of the SQLite data type:
+// int64 for INTEGER, float64 for FLOAT, []byte for BLOB, string for TEXT.
//
// The function can additionally be variadic, as long as the type of
// the variadic argument is one of the above.
@@ -609,7 +619,7 @@ func (c *SQLiteConn) RegisterAuthorizer(callback func(int, string, string, strin
// optimizations in its queries.
//
// See _example/go_custom_funcs for a detailed example.
-func (c *SQLiteConn) RegisterFunc(name string, impl interface{}, pure bool) error {
+func (c *SQLiteConn) RegisterFunc(name string, impl any, pure bool) error {
var fi functionInfo
fi.f = reflect.ValueOf(impl)
t := fi.f.Type()
@@ -691,7 +701,7 @@ func sqlite3CreateFunction(db *C.sqlite3, zFunctionName *C.char, nArg C.int, eTe
// return an error in addition to their other return values.
//
// See _example/go_custom_funcs for a detailed example.
-func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool) error {
+func (c *SQLiteConn) RegisterAggregator(name string, impl any, pure bool) error {
var ai aggInfo
ai.constructor = reflect.ValueOf(impl)
t := ai.constructor.Type()
@@ -865,14 +875,16 @@ func (c *SQLiteConn) exec(ctx context.Context, query string, args []driver.Named
// consume the number of arguments used in the current
// statement and append all named arguments not
// contained therein
- stmtArgs = append(stmtArgs, args[start:start+na]...)
- for i := range args {
- if (i < start || i >= na) && args[i].Name != "" {
- stmtArgs = append(stmtArgs, args[i])
+ if len(args[start:start+na]) > 0 {
+ stmtArgs = append(stmtArgs, args[start:start+na]...)
+ for i := range args {
+ if (i < start || i >= na) && args[i].Name != "" {
+ stmtArgs = append(stmtArgs, args[i])
+ }
+ }
+ for i := range stmtArgs {
+ stmtArgs[i].Ordinal = i + 1
}
- }
- for i := range stmtArgs {
- stmtArgs[i].Ordinal = i + 1
}
res, err = s.(*SQLiteStmt).exec(ctx, stmtArgs)
if err != nil && err != driver.ErrSkip {
@@ -965,103 +977,104 @@ func (c *SQLiteConn) begin(ctx context.Context) (driver.Tx, error) {
// The argument is may be either in parentheses or it may be separated from
// the pragma name by an equal sign. The two syntaxes yield identical results.
// In many pragmas, the argument is a boolean. The boolean can be one of:
-// 1 yes true on
-// 0 no false off
+//
+// 1 yes true on
+// 0 no false off
//
// You can specify a DSN string using a URI as the filename.
-// test.db
-// file:test.db?cache=shared&mode=memory
-// :memory:
-// file::memory:
//
-// mode
-// Access mode of the database.
-// https://www.sqlite.org/c3ref/open.html
-// Values:
-// - ro
-// - rw
-// - rwc
-// - memory
+// test.db
+// file:test.db?cache=shared&mode=memory
+// :memory:
+// file::memory:
//
-// cache
-// SQLite Shared-Cache Mode
-// https://www.sqlite.org/sharedcache.html
-// Values:
-// - shared
-// - private
+// mode
+// Access mode of the database.
+// https://www.sqlite.org/c3ref/open.html
+// Values:
+// - ro
+// - rw
+// - rwc
+// - memory
//
-// immutable=Boolean
-// The immutable parameter is a boolean query parameter that indicates
-// that the database file is stored on read-only media. When immutable is set,
-// SQLite assumes that the database file cannot be changed,
-// even by a process with higher privilege,
-// and so the database is opened read-only and all locking and change detection is disabled.
-// Caution: Setting the immutable property on a database file that
-// does in fact change can result in incorrect query results and/or SQLITE_CORRUPT errors.
+// cache
+// SQLite Shared-Cache Mode
+// https://www.sqlite.org/sharedcache.html
+// Values:
+// - shared
+// - private
//
-// go-sqlite3 adds the following query parameters to those used by SQLite:
-// _loc=XXX
-// Specify location of time format. It's possible to specify "auto".
+// immutable=Boolean
+// The immutable parameter is a boolean query parameter that indicates
+// that the database file is stored on read-only media. When immutable is set,
+// SQLite assumes that the database file cannot be changed,
+// even by a process with higher privilege,
+// and so the database is opened read-only and all locking and change detection is disabled.
+// Caution: Setting the immutable property on a database file that
+// does in fact change can result in incorrect query results and/or SQLITE_CORRUPT errors.
//
-// _mutex=XXX
-// Specify mutex mode. XXX can be "no", "full".
+// go-sqlite3 adds the following query parameters to those used by SQLite:
//
-// _txlock=XXX
-// Specify locking behavior for transactions. XXX can be "immediate",
-// "deferred", "exclusive".
+// _loc=XXX
+// Specify location of time format. It's possible to specify "auto".
//
-// _auto_vacuum=X | _vacuum=X
-// 0 | none - Auto Vacuum disabled
-// 1 | full - Auto Vacuum FULL
-// 2 | incremental - Auto Vacuum Incremental
+// _mutex=XXX
+// Specify mutex mode. XXX can be "no", "full".
//
-// _busy_timeout=XXX"| _timeout=XXX
-// Specify value for sqlite3_busy_timeout.
+// _txlock=XXX
+// Specify locking behavior for transactions. XXX can be "immediate",
+// "deferred", "exclusive".
//
-// _case_sensitive_like=Boolean | _cslike=Boolean
-// https://www.sqlite.org/pragma.html#pragma_case_sensitive_like
-// Default or disabled the LIKE operation is case-insensitive.
-// When enabling this options behaviour of LIKE will become case-sensitive.
+// _auto_vacuum=X | _vacuum=X
+// 0 | none - Auto Vacuum disabled
+// 1 | full - Auto Vacuum FULL
+// 2 | incremental - Auto Vacuum Incremental
//
-// _defer_foreign_keys=Boolean | _defer_fk=Boolean
-// Defer Foreign Keys until outermost transaction is committed.
+// _busy_timeout=XXX"| _timeout=XXX
+// Specify value for sqlite3_busy_timeout.
//
-// _foreign_keys=Boolean | _fk=Boolean
-// Enable or disable enforcement of foreign keys.
+// _case_sensitive_like=Boolean | _cslike=Boolean
+// https://www.sqlite.org/pragma.html#pragma_case_sensitive_like
+// Default or disabled the LIKE operation is case-insensitive.
+// When enabling this options behaviour of LIKE will become case-sensitive.
//
-// _ignore_check_constraints=Boolean
-// This pragma enables or disables the enforcement of CHECK constraints.
-// The default setting is off, meaning that CHECK constraints are enforced by default.
+// _defer_foreign_keys=Boolean | _defer_fk=Boolean
+// Defer Foreign Keys until outermost transaction is committed.
//
-// _journal_mode=MODE | _journal=MODE
-// Set journal mode for the databases associated with the current connection.
-// https://www.sqlite.org/pragma.html#pragma_journal_mode
+// _foreign_keys=Boolean | _fk=Boolean
+// Enable or disable enforcement of foreign keys.
//
-// _locking_mode=X | _locking=X
-// Sets the database connection locking-mode.
-// The locking-mode is either NORMAL or EXCLUSIVE.
-// https://www.sqlite.org/pragma.html#pragma_locking_mode
+// _ignore_check_constraints=Boolean
+// This pragma enables or disables the enforcement of CHECK constraints.
+// The default setting is off, meaning that CHECK constraints are enforced by default.
//
-// _query_only=Boolean
-// The query_only pragma prevents all changes to database files when enabled.
+// _journal_mode=MODE | _journal=MODE
+// Set journal mode for the databases associated with the current connection.
+// https://www.sqlite.org/pragma.html#pragma_journal_mode
//
-// _recursive_triggers=Boolean | _rt=Boolean
-// Enable or disable recursive triggers.
+// _locking_mode=X | _locking=X
+// Sets the database connection locking-mode.
+// The locking-mode is either NORMAL or EXCLUSIVE.
+// https://www.sqlite.org/pragma.html#pragma_locking_mode
//
-// _secure_delete=Boolean|FAST
-// When secure_delete is on, SQLite overwrites deleted content with zeros.
-// https://www.sqlite.org/pragma.html#pragma_secure_delete
+// _query_only=Boolean
+// The query_only pragma prevents all changes to database files when enabled.
//
-// _synchronous=X | _sync=X
-// Change the setting of the "synchronous" flag.
-// https://www.sqlite.org/pragma.html#pragma_synchronous
+// _recursive_triggers=Boolean | _rt=Boolean
+// Enable or disable recursive triggers.
//
-// _writable_schema=Boolean
-// When this pragma is on, the SQLITE_MASTER tables in which database
-// can be changed using ordinary UPDATE, INSERT, and DELETE statements.
-// Warning: misuse of this pragma can easily result in a corrupt database file.
+// _secure_delete=Boolean|FAST
+// When secure_delete is on, SQLite overwrites deleted content with zeros.
+// https://www.sqlite.org/pragma.html#pragma_secure_delete
//
+// _synchronous=X | _sync=X
+// Change the setting of the "synchronous" flag.
+// https://www.sqlite.org/pragma.html#pragma_synchronous
//
+// _writable_schema=Boolean
+// When this pragma is on, the SQLITE_MASTER tables in which database
+// can be changed using ordinary UPDATE, INSERT, and DELETE statements.
+// Warning: misuse of this pragma can easily result in a corrupt database file.
func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
if C.sqlite3_threadsafe() == 0 {
return nil, errors.New("sqlite library was not compiled for thread-safe operation")
@@ -1895,6 +1908,7 @@ func (s *SQLiteStmt) Close() error {
if rv != C.SQLITE_OK {
return s.c.lastError()
}
+ s.c = nil
runtime.SetFinalizer(s, nil)
return nil
}
@@ -2000,6 +2014,7 @@ func (s *SQLiteStmt) query(ctx context.Context, args []driver.NamedValue) (drive
closed: false,
ctx: ctx,
}
+ runtime.SetFinalizer(rows, (*SQLiteRows).Close)
return rows, nil
}
@@ -2045,6 +2060,7 @@ func (s *SQLiteStmt) exec(ctx context.Context, args []driver.NamedValue) (driver
err error
}
resultCh := make(chan result)
+ defer close(resultCh)
go func() {
r, err := s.execSync(args)
resultCh <- result{r, err}
@@ -2111,6 +2127,8 @@ func (rc *SQLiteRows) Close() error {
return rc.s.c.lastError()
}
rc.s.mu.Unlock()
+ rc.s = nil
+ runtime.SetFinalizer(rc, nil)
return nil
}
@@ -2157,6 +2175,7 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
return rc.nextSyncLocked(dest)
}
resultCh := make(chan error)
+ defer close(resultCh)
go func() {
resultCh <- rc.nextSyncLocked(dest)
}()
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_context.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_context.go
index 7c7431d..7c7431d 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_context.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_context.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt.go
index afd9333..bd9a3bc 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt.go
@@ -50,15 +50,15 @@ import (
// perhaps using a cryptographic hash function like SHA1.
// CryptEncoderSHA1 encodes a password with SHA1
-func CryptEncoderSHA1(pass []byte, hash interface{}) []byte {
+func CryptEncoderSHA1(pass []byte, hash any) []byte {
h := sha1.Sum(pass)
return h[:]
}
// CryptEncoderSSHA1 encodes a password with SHA1 with the
// configured salt.
-func CryptEncoderSSHA1(salt string) func(pass []byte, hash interface{}) []byte {
- return func(pass []byte, hash interface{}) []byte {
+func CryptEncoderSSHA1(salt string) func(pass []byte, hash any) []byte {
+ return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha1.Sum(p)
@@ -67,15 +67,15 @@ func CryptEncoderSSHA1(salt string) func(pass []byte, hash interface{}) []byte {
}
// CryptEncoderSHA256 encodes a password with SHA256
-func CryptEncoderSHA256(pass []byte, hash interface{}) []byte {
+func CryptEncoderSHA256(pass []byte, hash any) []byte {
h := sha256.Sum256(pass)
return h[:]
}
// CryptEncoderSSHA256 encodes a password with SHA256
// with the configured salt
-func CryptEncoderSSHA256(salt string) func(pass []byte, hash interface{}) []byte {
- return func(pass []byte, hash interface{}) []byte {
+func CryptEncoderSSHA256(salt string) func(pass []byte, hash any) []byte {
+ return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha256.Sum256(p)
@@ -84,15 +84,15 @@ func CryptEncoderSSHA256(salt string) func(pass []byte, hash interface{}) []byte
}
// CryptEncoderSHA384 encodes a password with SHA384
-func CryptEncoderSHA384(pass []byte, hash interface{}) []byte {
+func CryptEncoderSHA384(pass []byte, hash any) []byte {
h := sha512.Sum384(pass)
return h[:]
}
// CryptEncoderSSHA384 encodes a password with SHA384
// with the configured salt
-func CryptEncoderSSHA384(salt string) func(pass []byte, hash interface{}) []byte {
- return func(pass []byte, hash interface{}) []byte {
+func CryptEncoderSSHA384(salt string) func(pass []byte, hash any) []byte {
+ return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha512.Sum384(p)
@@ -101,15 +101,15 @@ func CryptEncoderSSHA384(salt string) func(pass []byte, hash interface{}) []byte
}
// CryptEncoderSHA512 encodes a password with SHA512
-func CryptEncoderSHA512(pass []byte, hash interface{}) []byte {
+func CryptEncoderSHA512(pass []byte, hash any) []byte {
h := sha512.Sum512(pass)
return h[:]
}
// CryptEncoderSSHA512 encodes a password with SHA512
// with the configured salt
-func CryptEncoderSSHA512(salt string) func(pass []byte, hash interface{}) []byte {
- return func(pass []byte, hash interface{}) []byte {
+func CryptEncoderSSHA512(salt string) func(pass []byte, hash any) []byte {
+ return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha512.Sum512(p)
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt_test.go
index 0329ca8..e37b467 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt_test.go
@@ -29,7 +29,7 @@ func TestCryptEncoders(t *testing.T) {
}
for _, e := range tests {
- var fn func(pass []byte, hash interface{}) []byte
+ var fn func(pass []byte, hash any) []byte
switch e.enc {
case "sha1":
fn = CryptEncoderSHA1
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go113_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go113_test.go
index a010cb7..f38d6d1 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go113_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go113_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build go1.13 && cgo
// +build go1.13,cgo
package sqlite3
@@ -45,7 +46,7 @@ func TestBeginTxCancel(t *testing.T) {
}
}()
- err = conn.Raw(func(driverConn interface{}) error {
+ err = conn.Raw(func(driverConn any) error {
d, ok := driverConn.(driver.ConnBeginTx)
if !ok {
t.Fatal("unexpected: wrong type")
@@ -96,7 +97,7 @@ func TestStmtReadonly(t *testing.T) {
}
var ro bool
- c.Raw(func(dc interface{}) error {
+ c.Raw(func(dc any) error {
stmt, err := dc.(*SQLiteConn).Prepare(query)
if err != nil {
return err
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18.go
index 514fd7e..34cad08 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18.go
@@ -3,8 +3,8 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
-// +build cgo
-// +build go1.8
+//go:build cgo && go1.8
+// +build cgo,go1.8
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18_test.go
index 8c8c451..eec7479 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build go1.8 && cgo
// +build go1.8,cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_libsqlite3.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_libsqlite3.go
index ac609c9..95cc7c0 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_libsqlite3.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_libsqlite3.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build libsqlite3
// +build libsqlite3
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension.go
index 9433fea..03cbc8b 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !sqlite_omit_load_extension
// +build !sqlite_omit_load_extension
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_omit.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_omit.go
index 8c75f9b..d4f8ce6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_omit.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_omit.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_omit_load_extension
// +build sqlite_omit_load_extension
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_test.go
index 97b1123..c6c03bb 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !sqlite_omit_load_extension
// +build !sqlite_omit_load_extension
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_allow_uri_authority.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_allow_uri_authority.go
index 8c4d4d2..51240cb 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_allow_uri_authority.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_allow_uri_authority.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_allow_uri_authority
// +build sqlite_allow_uri_authority
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_app_armor.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_app_armor.go
index 63c80cf..565dbc2 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_app_armor.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_app_armor.go
@@ -4,8 +4,8 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
-// +build !windows
-// +build sqlite_app_armor
+//go:build !windows && sqlite_app_armor
+// +build !windows,sqlite_app_armor
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata.go
index c67fa82..63659b4 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata.go
@@ -1,3 +1,4 @@
+//go:build sqlite_column_metadata
// +build sqlite_column_metadata
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata_test.go
index 28767f1..0a9eec6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata_test.go
@@ -1,3 +1,4 @@
+//go:build sqlite_column_metadata
// +build sqlite_column_metadata
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_foreign_keys.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_foreign_keys.go
index a676e09..82c944e 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_foreign_keys.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_foreign_keys.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_foreign_keys
// +build sqlite_foreign_keys
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts3_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts3_test.go
index ce44474..a7b31a7 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts3_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts3_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts5.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts5.go
index 0f38df7..2645f28 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts5.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts5.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_fts5 || fts5
// +build sqlite_fts5 fts5
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_icu.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_icu.go
index f82bdd0..2d47827 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_icu.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_icu.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_icu || icu
// +build sqlite_icu icu
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_introspect.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_introspect.go
index 6512b2b..cd2e540 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_introspect.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_introspect.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_introspect
// +build sqlite_introspect
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions.go
index 7cd68d3..bd62d9a 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_math_functions
// +build sqlite_math_functions
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions_test.go
index 6ff076b..09dbd8d 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions_test.go
@@ -1,3 +1,4 @@
+//go:build sqlite_math_functions
// +build sqlite_math_functions
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_os_trace.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_os_trace.go
index 9a30566..9a30566 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_os_trace.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_os_trace.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate.go
index cea032e..ed725ee 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook.go
index b43e482..8cce278 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_preupdate_hook
// +build sqlite_preupdate_hook
package sqlite3
@@ -54,10 +55,10 @@ func (d *SQLitePreUpdateData) Count() int {
return int(C.sqlite3_preupdate_count(d.Conn.db))
}
-func (d *SQLitePreUpdateData) row(dest []interface{}, new bool) error {
+func (d *SQLitePreUpdateData) row(dest []any, new bool) error {
for i := 0; i < d.Count() && i < len(dest); i++ {
var val *C.sqlite3_value
- var src interface{}
+ var src any
// Initially I tried making this just a function pointer argument, but
// it's absurdly complicated to pass C function pointers.
@@ -95,7 +96,7 @@ func (d *SQLitePreUpdateData) row(dest []interface{}, new bool) error {
// Old populates dest with the row data to be replaced. This works similar to
// database/sql's Rows.Scan()
-func (d *SQLitePreUpdateData) Old(dest ...interface{}) error {
+func (d *SQLitePreUpdateData) Old(dest ...any) error {
if d.Op == SQLITE_INSERT {
return errors.New("There is no old row for INSERT operations")
}
@@ -104,7 +105,7 @@ func (d *SQLitePreUpdateData) Old(dest ...interface{}) error {
// New populates dest with the replacement row data. This works similar to
// database/sql's Rows.Scan()
-func (d *SQLitePreUpdateData) New(dest ...interface{}) error {
+func (d *SQLitePreUpdateData) New(dest ...any) error {
if d.Op == SQLITE_DELETE {
return errors.New("There is no new row for DELETE operations")
}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook_test.go
index 20c8766..4892602 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook_test.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_preupdate_hook
// +build sqlite_preupdate_hook
package sqlite3
@@ -18,8 +19,8 @@ type preUpdateHookDataForTest struct {
tableName string
count int
op int
- oldRow []interface{}
- newRow []interface{}
+ oldRow []any
+ newRow []any
}
func TestPreUpdateHook(t *testing.T) {
@@ -29,7 +30,7 @@ func TestPreUpdateHook(t *testing.T) {
ConnectHook: func(conn *SQLiteConn) error {
conn.RegisterPreUpdateHook(func(data SQLitePreUpdateData) {
eval := -1
- oldRow := []interface{}{eval}
+ oldRow := []any{eval}
if data.Op != SQLITE_INSERT {
err := data.Old(oldRow...)
if err != nil {
@@ -38,7 +39,7 @@ func TestPreUpdateHook(t *testing.T) {
}
eval2 := -1
- newRow := []interface{}{eval2}
+ newRow := []any{eval2}
if data.Op != SQLITE_DELETE {
err := data.New(newRow...)
if err != nil {
@@ -47,7 +48,7 @@ func TestPreUpdateHook(t *testing.T) {
}
// tests dest bound checks in loop
- var tooSmallRow []interface{}
+ var tooSmallRow []any
if data.Op != SQLITE_INSERT {
err := data.Old(tooSmallRow...)
if err != nil {
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_omit.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_omit.go
index c510a15..f60da6c 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_omit.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_omit.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !sqlite_preupdate_hook && cgo
// +build !sqlite_preupdate_hook,cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete.go
index 934fa6b..6bb05b8 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_secure_delete
// +build sqlite_secure_delete
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete_fast.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete_fast.go
index b0de130..982020a 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete_fast.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete_fast.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_secure_delete_fast
// +build sqlite_secure_delete_fast
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize.go
index 2560c43..f1710c1 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize.go
@@ -1,3 +1,4 @@
+//go:build !libsqlite3 || sqlite_serialize
// +build !libsqlite3 sqlite_serialize
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_omit.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_omit.go
index b154dd3..d00ead0 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_omit.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_omit.go
@@ -1,3 +1,4 @@
+//go:build libsqlite3 && !sqlite_serialize
// +build libsqlite3,!sqlite_serialize
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_test.go
index 624c5a9..5c7efec 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_test.go
@@ -1,3 +1,4 @@
+//go:build !libsqlite3 || sqlite_serialize
// +build !libsqlite3 sqlite_serialize
package sqlite3
@@ -54,7 +55,7 @@ func TestSerializeDeserialize(t *testing.T) {
defer srcConn.Close()
var serialized []byte
- if err := srcConn.Raw(func(raw interface{}) error {
+ if err := srcConn.Raw(func(raw any) error {
var err error
serialized, err = raw.(*SQLiteConn).Serialize("")
return err
@@ -80,7 +81,7 @@ func TestSerializeDeserialize(t *testing.T) {
}
defer destConn.Close()
- if err := destConn.Raw(func(raw interface{}) error {
+ if err := destConn.Raw(func(raw any) error {
return raw.(*SQLiteConn).Deserialize(serialized, "")
}); err != nil {
t.Fatal("Failed to deserialize source database:", err)
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_stat4.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_stat4.go
index d4d30f0..799fbb0 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_stat4.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_stat4.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_stat4
// +build sqlite_stat4
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.c b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.c
index fc37b33..fc37b33 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.c
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.c
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.go
index adfa26c..76f7bbf 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.go
@@ -3,8 +3,8 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
-// +build cgo
-// +build sqlite_unlock_notify
+//go:build cgo && sqlite_unlock_notify
+// +build cgo,sqlite_unlock_notify
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify_test.go
index 95db938..3a9168c 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_unlock_notify
// +build sqlite_unlock_notify
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth.go
index b62b608..de9630c 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_userauth
// +build sqlite_userauth
package sqlite3
@@ -79,7 +80,7 @@ var (
// If a database contains the SQLITE_USER table, then the
// call to Authenticate must be invoked with an
// appropriate username and password prior to enable read and write
-//access to the database.
+// access to the database.
//
// Return SQLITE_OK on success or SQLITE_ERROR if the username/password
// combination is incorrect or unknown.
@@ -103,9 +104,10 @@ func (c *SQLiteConn) Authenticate(username, password string) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authenticate(username, password string) int {
// Allocate C Variables
cuser := C.CString(username)
@@ -155,9 +157,10 @@ func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserAdd(username, password string, admin int) int {
// Allocate C Variables
cuser := C.CString(username)
@@ -207,9 +210,10 @@ func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserChange(username, password string, admin int) int {
// Allocate C Variables
cuser := C.CString(username)
@@ -249,9 +253,10 @@ func (c *SQLiteConn) AuthUserDelete(username string) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserDelete(username string) int {
// Allocate C Variables
cuser := C.CString(username)
@@ -280,8 +285,9 @@ func (c *SQLiteConn) AuthEnabled() (exists bool) {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// 0 - Disabled
-// 1 - Enabled
+//
+// 0 - Disabled
+// 1 - Enabled
func (c *SQLiteConn) authEnabled() int {
return int(C._sqlite3_auth_enabled(c.db))
}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_omit.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_omit.go
index 302cd57..15370df 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_omit.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_omit.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !sqlite_userauth
// +build !sqlite_userauth
package sqlite3
@@ -17,7 +18,7 @@ import (
// If a database contains the SQLITE_USER table, then the
// call to Authenticate must be invoked with an
// appropriate username and password prior to enable read and write
-//access to the database.
+// access to the database.
//
// Return SQLITE_OK on success or SQLITE_ERROR if the username/password
// combination is incorrect or unknown.
@@ -34,9 +35,10 @@ func (c *SQLiteConn) Authenticate(username, password string) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authenticate(username, password string) int {
// NOOP
return 0
@@ -65,9 +67,10 @@ func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserAdd(username, password string, admin int) int {
// NOOP
return 0
@@ -96,9 +99,10 @@ func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserChange(username, password string, admin int) int {
// NOOP
return 0
@@ -122,9 +126,10 @@ func (c *SQLiteConn) AuthUserDelete(username string) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserDelete(username string) int {
// NOOP
return 0
@@ -142,8 +147,9 @@ func (c *SQLiteConn) AuthEnabled() (exists bool) {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// 0 - Disabled
-// 1 - Enabled
+//
+// 0 - Disabled
+// 1 - Enabled
func (c *SQLiteConn) authEnabled() int {
// NOOP
return 0
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_test.go
index 543f48e..12e1151 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_userauth
// +build sqlite_userauth
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_full.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_full.go
index 5185a96..df13c9d 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_full.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_full.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_vacuum_full
// +build sqlite_vacuum_full
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_incr.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_incr.go
index a9d8a18..a2e4881 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_incr.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_incr.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_vacuum_incr
// +build sqlite_vacuum_incr
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable.go
index 4a93c46..9b164b3 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_vtable || vtable
// +build sqlite_vtable vtable
package sqlite3
@@ -516,7 +517,7 @@ func goMDestroy(pClientData unsafe.Pointer) {
func goVFilter(pCursor unsafe.Pointer, idxNum C.int, idxName *C.char, argc C.int, argv **C.sqlite3_value) *C.char {
vtc := lookupHandle(pCursor).(*sqliteVTabCursor)
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
- vals := make([]interface{}, 0, argc)
+ vals := make([]any, 0, argc)
for _, v := range args {
conv, err := callbackArgGeneric(v)
if err != nil {
@@ -588,7 +589,7 @@ func goVUpdate(pVTab unsafe.Pointer, argc C.int, argv **C.sqlite3_value, pRowid
if v, ok := vt.vTab.(VTabUpdater); ok {
// convert argv
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
- vals := make([]interface{}, 0, argc)
+ vals := make([]any, 0, argc)
for _, v := range args {
conv, err := callbackArgGeneric(v)
if err != nil {
@@ -662,9 +663,9 @@ type VTab interface {
// deleted.
// See: https://sqlite.org/vtab.html#xupdate
type VTabUpdater interface {
- Delete(interface{}) error
- Insert(interface{}, []interface{}) (int64, error)
- Update(interface{}, []interface{}) error
+ Delete(any) error
+ Insert(any, []any) (int64, error)
+ Update(any, []any) error
}
// VTabCursor describes cursors that point into the virtual table and are used
@@ -673,7 +674,7 @@ type VTabCursor interface {
// http://sqlite.org/vtab.html#xclose
Close() error
// http://sqlite.org/vtab.html#xfilter
- Filter(idxNum int, idxStr string, vals []interface{}) error
+ Filter(idxNum int, idxStr string, vals []any) error
// http://sqlite.org/vtab.html#xnext
Next() error
// http://sqlite.org/vtab.html#xeof
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable_test.go
index aae646d..64511e2 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable_test.go
@@ -98,7 +98,7 @@ func (vc *testVTabCursor) Close() error {
return nil
}
-func (vc *testVTabCursor) Filter(idxNum int, idxStr string, vals []interface{}) error {
+func (vc *testVTabCursor) Filter(idxNum int, idxStr string, vals []any) error {
vc.index = 0
return nil
}
@@ -236,10 +236,10 @@ func TestVUpdate(t *testing.T) {
if len(vt.data) != 2 {
t.Fatalf("expected table vt to have exactly 2 rows, got: %d", len(vt.data))
}
- if !reflect.DeepEqual(vt.data[0], []interface{}{int64(115), "b", "c"}) {
+ if !reflect.DeepEqual(vt.data[0], []any{int64(115), "b", "c"}) {
t.Fatalf("expected table vt entry 0 to be [115 b c], instead: %v", vt.data[0])
}
- if !reflect.DeepEqual(vt.data[1], []interface{}{int64(116), "d", "e"}) {
+ if !reflect.DeepEqual(vt.data[1], []any{int64(116), "d", "e"}) {
t.Fatalf("expected table vt entry 1 to be [116 d e], instead: %v", vt.data[1])
}
@@ -273,10 +273,10 @@ func TestVUpdate(t *testing.T) {
if len(vt.data) != 2 {
t.Fatalf("expected table vt to have exactly 2 rows, got: %d", len(vt.data))
}
- if !reflect.DeepEqual(vt.data[0], []interface{}{int64(115), "b", "c"}) {
+ if !reflect.DeepEqual(vt.data[0], []any{int64(115), "b", "c"}) {
t.Fatalf("expected table vt entry 0 to be [115 b c], instead: %v", vt.data[0])
}
- if !reflect.DeepEqual(vt.data[1], []interface{}{int64(117), "f", "e"}) {
+ if !reflect.DeepEqual(vt.data[1], []any{int64(117), "f", "e"}) {
t.Fatalf("expected table vt entry 1 to be [117 f e], instead: %v", vt.data[1])
}
@@ -297,7 +297,7 @@ func TestVUpdate(t *testing.T) {
if len(vt.data) != 1 {
t.Fatalf("expected table vt to have exactly 1 row, got: %d", len(vt.data))
}
- if !reflect.DeepEqual(vt.data[0], []interface{}{int64(115), "b", "c"}) {
+ if !reflect.DeepEqual(vt.data[0], []any{int64(115), "b", "c"}) {
t.Fatalf("expected table vt entry 0 to be [115 b c], instead: %v", vt.data[0])
}
@@ -353,7 +353,7 @@ func (m *vtabUpdateModule) Create(c *SQLiteConn, args []string) (VTab, error) {
}
// create table
- vtab := &vtabUpdateTable{m.t, dbname, tname, cols, typs, make([][]interface{}, 0)}
+ vtab := &vtabUpdateTable{m.t, dbname, tname, cols, typs, make([][]any, 0)}
m.tables[tname] = vtab
return vtab, nil
}
@@ -370,7 +370,7 @@ type vtabUpdateTable struct {
name string
cols []string
typs []string
- data [][]interface{}
+ data [][]any
}
func (t *vtabUpdateTable) Open() (VTabCursor, error) {
@@ -389,7 +389,7 @@ func (t *vtabUpdateTable) Destroy() error {
return nil
}
-func (t *vtabUpdateTable) Insert(id interface{}, vals []interface{}) (int64, error) {
+func (t *vtabUpdateTable) Insert(id any, vals []any) (int64, error) {
var i int64
if id == nil {
i, t.data = int64(len(t.data)), append(t.data, vals)
@@ -407,7 +407,7 @@ func (t *vtabUpdateTable) Insert(id interface{}, vals []interface{}) (int64, err
return i, nil
}
-func (t *vtabUpdateTable) Update(id interface{}, vals []interface{}) error {
+func (t *vtabUpdateTable) Update(id any, vals []any) error {
i, ok := id.(int64)
if !ok {
return fmt.Errorf("id is invalid type: %T", id)
@@ -422,7 +422,7 @@ func (t *vtabUpdateTable) Update(id interface{}, vals []interface{}) error {
return nil
}
-func (t *vtabUpdateTable) Delete(id interface{}) error {
+func (t *vtabUpdateTable) Delete(id any) error {
i, ok := id.(int64)
if !ok {
return fmt.Errorf("id is invalid type: %T", id)
@@ -465,7 +465,7 @@ func (c *vtabUpdateCursor) Column(ctxt *SQLiteContext, col int) error {
return nil
}
-func (c *vtabUpdateCursor) Filter(ixNum int, ixName string, vals []interface{}) error {
+func (c *vtabUpdateCursor) Filter(ixNum int, ixName string, vals []any) error {
return nil
}
@@ -547,7 +547,7 @@ func (vc *testVTabCursorEponymousOnly) Close() error {
return nil
}
-func (vc *testVTabCursorEponymousOnly) Filter(idxNum int, idxStr string, vals []interface{}) error {
+func (vc *testVTabCursorEponymousOnly) Filter(idxNum int, idxStr string, vals []any) error {
vc.index = 0
return nil
}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_other.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_other.go
index 077d3c6..1f9a755 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_other.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_other.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !windows
// +build !windows
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_solaris.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_solaris.go
index 102f90c..fb4d325 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_solaris.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_solaris.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build solaris
// +build solaris
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_test.go
index 326361e..63c939d 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_test.go
@@ -625,7 +625,7 @@ func TestTimestamp(t *testing.T) {
timestamp3 := time.Date(2012, time.November, 4, 0, 0, 0, 0, time.UTC)
tzTest := time.FixedZone("TEST", -9*3600-13*60)
tests := []struct {
- value interface{}
+ value any
expected time.Time
}{
{"nonsense", time.Time{}},
@@ -827,7 +827,7 @@ func TestFloat32(t *testing.T) {
t.Fatal("Unable to query results:", err)
}
- var id interface{}
+ var id any
if err := rows.Scan(&id); err != nil {
t.Fatal("Unable to scan results:", err)
}
@@ -854,7 +854,7 @@ func TestNull(t *testing.T) {
t.Fatal("Unable to query results:", err)
}
- var v interface{}
+ var v any
if err := rows.Scan(&v); err != nil {
t.Fatal("Unable to scan results:", err)
}
@@ -998,7 +998,7 @@ func TestTimezoneConversion(t *testing.T) {
timestamp2 := time.Date(2006, time.January, 2, 15, 4, 5, 123456789, time.UTC)
timestamp3 := time.Date(2012, time.November, 4, 0, 0, 0, 0, time.UTC)
tests := []struct {
- value interface{}
+ value any
expected time.Time
}{
{"nonsense", time.Time{}.In(loc)},
@@ -1128,7 +1128,7 @@ func TestQueryer(t *testing.T) {
if err != nil {
t.Error("Failed to db.Query:", err)
}
- if id != n + 1 {
+ if id != n+1 {
t.Error("Failed to db.Query: not matched results")
}
n = n + 1
@@ -1291,7 +1291,7 @@ const CurrentTimeStamp = "2006-01-02 15:04:05"
type TimeStamp struct{ *time.Time }
-func (t TimeStamp) Scan(value interface{}) error {
+func (t TimeStamp) Scan(value any) error {
var err error
switch v := value.(type) {
case string:
@@ -1335,7 +1335,7 @@ func TestFunctionRegistration(t *testing.T) {
regex := func(re, s string) (bool, error) {
return regexp.MatchString(re, s)
}
- generic := func(a interface{}) int64 {
+ generic := func(a any) int64 {
switch a.(type) {
case int64:
return 1
@@ -1356,7 +1356,7 @@ func TestFunctionRegistration(t *testing.T) {
}
return ret
}
- variadicGeneric := func(a ...interface{}) int64 {
+ variadicGeneric := func(a ...any) int64 {
return int64(len(a))
}
@@ -1406,7 +1406,7 @@ func TestFunctionRegistration(t *testing.T) {
ops := []struct {
query string
- expected interface{}
+ expected any
}{
{"SELECT addi8_16_32(1,2)", int32(3)},
{"SELECT addi64(1,2)", int64(3)},
@@ -1497,28 +1497,28 @@ func TestAggregatorRegistration(t *testing.T) {
}
type mode struct {
- counts map[interface{}]int
- top interface{}
- topCount int
+ counts map[any]int
+ top any
+ topCount int
}
func newMode() *mode {
- return &mode{
- counts: map[interface{}]int{},
- }
+ return &mode{
+ counts: map[any]int{},
+ }
}
-func (m *mode) Step(x interface{}) {
- m.counts[x]++
- c := m.counts[x]
- if c > m.topCount {
- m.top = x
- m.topCount = c
- }
+func (m *mode) Step(x any) {
+ m.counts[x]++
+ c := m.counts[x]
+ if c > m.topCount {
+ m.top = x
+ m.topCount = c
+ }
}
-func (m *mode) Done() interface{} {
- return m.top
+func (m *mode) Done() any {
+ return m.top
}
func TestAggregatorRegistration_GenericReturn(t *testing.T) {
@@ -1534,19 +1534,19 @@ func TestAggregatorRegistration_GenericReturn(t *testing.T) {
defer db.Close()
_, err = db.Exec("create table foo (department integer, profits integer)")
- if err != nil {
- t.Fatal("Failed to create table:", err)
- }
- _, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115), (2, 20)")
- if err != nil {
- t.Fatal("Failed to insert records:", err)
- }
+ if err != nil {
+ t.Fatal("Failed to create table:", err)
+ }
+ _, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115), (2, 20)")
+ if err != nil {
+ t.Fatal("Failed to insert records:", err)
+ }
var mode int
- err = db.QueryRow("select mode(profits) from foo").Scan(&mode)
- if err != nil {
- t.Fatal("MODE query error:", err)
- }
+ err = db.QueryRow("select mode(profits) from foo").Scan(&mode)
+ if err != nil {
+ t.Fatal("MODE query error:", err)
+ }
if mode != 20 {
t.Fatal("Got incorrect mode. Wanted 20, got: ", mode)
@@ -1871,7 +1871,7 @@ func TestNonColumnString(t *testing.T) {
}
defer db.Close()
- var x interface{}
+ var x any
if err := db.QueryRow("SELECT 'hello'").Scan(&x); err != nil {
t.Fatal(err)
}
@@ -2113,7 +2113,7 @@ var benchmarks = []testing.InternalBenchmark{
{Name: "BenchmarkStmtRows", F: benchmarkStmtRows},
}
-func (db *TestDB) mustExec(sql string, args ...interface{}) sql.Result {
+func (db *TestDB) mustExec(sql string, args ...any) sql.Result {
res, err := db.Exec(sql, args...)
if err != nil {
db.Fatalf("Error running %q: %v", sql, err)
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_trace.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_trace.go
index 56bb914..6c47cce 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_trace.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_trace.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_trace || trace
// +build sqlite_trace trace
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_type.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_type.go
index 0fd8210..20537a0 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_type.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_type.go
@@ -74,7 +74,7 @@ func scanType(cdt string) reflect.Type {
case SQLITE_TIME:
return reflect.TypeOf(sql.NullTime{})
}
- return reflect.TypeOf(new(interface{}))
+ return reflect.TypeOf(new(any))
}
func databaseTypeConvSqlite(t string) int {
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_usleep_windows.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_usleep_windows.go
index b6739bf..6527f6f 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_usleep_windows.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_usleep_windows.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_windows.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_windows.go
index 81aa2ab..f863bcd 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_windows.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_windows.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build windows
// +build windows
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3ext.h b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3ext.h
index 819e2e3..935437b 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3ext.h
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3ext.h
@@ -366,6 +366,11 @@ struct sqlite3_api_routines {
int (*value_encoding)(sqlite3_value*);
/* Version 3.41.0 and later */
int (*is_interrupted)(sqlite3*);
+ /* Version 3.43.0 and later */
+ int (*stmt_explain)(sqlite3_stmt*,int);
+ /* Version 3.44.0 and later */
+ void *(*get_clientdata)(sqlite3*,const char*);
+ int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*));
};
/*
@@ -694,6 +699,11 @@ typedef int (*sqlite3_loadext_entry)(
#define sqlite3_value_encoding sqlite3_api->value_encoding
/* Version 3.41.0 and later */
#define sqlite3_is_interrupted sqlite3_api->is_interrupted
+/* Version 3.43.0 and later */
+#define sqlite3_stmt_explain sqlite3_api->stmt_explain
+/* Version 3.44.0 and later */
+#define sqlite3_get_clientdata sqlite3_api->get_clientdata
+#define sqlite3_set_clientdata sqlite3_api->set_clientdata
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/static_mock.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/static_mock.go
index f19e842..d2c5a27 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/static_mock.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/static_mock.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !cgo
// +build !cgo
package sqlite3
@@ -28,10 +29,10 @@ type (
)
func (SQLiteDriver) Open(s string) (driver.Conn, error) { return nil, errorMsg }
-func (c *SQLiteConn) RegisterAggregator(string, interface{}, bool) error { return errorMsg }
+func (c *SQLiteConn) RegisterAggregator(string, any, bool) error { return errorMsg }
func (c *SQLiteConn) RegisterAuthorizer(func(int, string, string, string) int) {}
func (c *SQLiteConn) RegisterCollation(string, func(string, string) int) error { return errorMsg }
func (c *SQLiteConn) RegisterCommitHook(func() int) {}
-func (c *SQLiteConn) RegisterFunc(string, interface{}, bool) error { return errorMsg }
+func (c *SQLiteConn) RegisterFunc(string, any, bool) error { return errorMsg }
func (c *SQLiteConn) RegisterRollbackHook(func()) {}
func (c *SQLiteConn) RegisterUpdateHook(func(int, string, string, int64)) {}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/CODEOWNERS b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/CODEOWNERS
new file mode 100644
index 0000000..1af2323
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/CODEOWNERS
@@ -0,0 +1 @@
+doctests/* @dmaier-redislabs
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/FUNDING.yml
index 707670d..707670d 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/FUNDING.yml
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/bug_report.md
index 3f934f8..3f934f8 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/bug_report.md
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/config.yml
index e86d7a6..e86d7a6 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/config.yml
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/dependabot.yml
index 77b7be5..77b7be5 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/dependabot.yml
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/release-drafter-config.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/release-drafter-config.yml
new file mode 100644
index 0000000..9ccb28a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/release-drafter-config.yml
@@ -0,0 +1,48 @@
+name-template: '$NEXT_MINOR_VERSION'
+tag-template: 'v$NEXT_MINOR_VERSION'
+autolabeler:
+ - label: 'maintenance'
+ files:
+ - '*.md'
+ - '.github/*'
+ - label: 'bug'
+ branch:
+ - '/bug-.+'
+ - label: 'maintenance'
+ branch:
+ - '/maintenance-.+'
+ - label: 'feature'
+ branch:
+ - '/feature-.+'
+categories:
+ - title: 'Breaking Changes'
+ labels:
+ - 'breakingchange'
+ - title: '🧪 Experimental Features'
+ labels:
+ - 'experimental'
+ - title: '🚀 New Features'
+ labels:
+ - 'feature'
+ - 'enhancement'
+ - title: '🐛 Bug Fixes'
+ labels:
+ - 'fix'
+ - 'bugfix'
+ - 'bug'
+ - 'BUG'
+ - title: '🧰 Maintenance'
+ label: 'maintenance'
+change-template: '- $TITLE (#$NUMBER)'
+exclude-labels:
+ - 'skip-changelog'
+template: |
+ # Changes
+
+ $CHANGES
+
+ ## Contributors
+ We'd like to thank all the contributors who worked on this release!
+
+ $CONTRIBUTORS
+
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/spellcheck-settings.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/spellcheck-settings.yml
new file mode 100644
index 0000000..b8ca6cc
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/spellcheck-settings.yml
@@ -0,0 +1,29 @@
+matrix:
+- name: Markdown
+ expect_match: false
+ apsell:
+ lang: en
+ d: en_US
+ ignore-case: true
+ dictionary:
+ wordlists:
+ - .github/wordlist.txt
+ output: wordlist.dic
+ pipeline:
+ - pyspelling.filters.markdown:
+ markdown_extensions:
+ - markdown.extensions.extra:
+ - pyspelling.filters.html:
+ comments: false
+ attributes:
+ - alt
+ ignores:
+ - ':matches(code, pre)'
+ - code
+ - pre
+ - blockquote
+ - img
+ sources:
+ - 'README.md'
+ - 'FAQ.md'
+ - 'docs/**'
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/wordlist.txt b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/wordlist.txt
new file mode 100644
index 0000000..52fdc1b
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/wordlist.txt
@@ -0,0 +1,60 @@
+ACLs
+autoload
+autoloader
+autoloading
+analytics
+Autoloading
+backend
+backends
+behaviour
+CAS
+ClickHouse
+config
+customizable
+Customizable
+dataset
+de
+DisableIdentity
+ElastiCache
+extensibility
+FPM
+Golang
+IANA
+keyspace
+keyspaces
+Kvrocks
+localhost
+Lua
+MSSQL
+namespace
+NoSQL
+ORM
+Packagist
+PhpRedis
+pipelining
+pluggable
+Predis
+PSR
+Quickstart
+README
+rebalanced
+rebalancing
+redis
+Redis
+RocksDB
+runtime
+SHA
+sharding
+SETNAME
+SSL
+struct
+stunnel
+TCP
+TLS
+uri
+URI
+url
+variadic
+RedisStack
+RedisGears
+RedisTimeseries \ No newline at end of file
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/build.yml
index a574e2e..e788016 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/build.yml
@@ -2,9 +2,12 @@ name: Go
on:
push:
- branches: [master]
+ branches: [master, v9]
pull_request:
- branches: [master]
+ branches: [master, v9]
+
+permissions:
+ contents: read
jobs:
build:
@@ -13,11 +16,11 @@ jobs:
strategy:
fail-fast: false
matrix:
- go-version: [1.16.x, 1.17.x]
+ go-version: [1.20.x, 1.21.x]
services:
redis:
- image: redis
+ image: redis/redis-stack-server:edge
options: >-
--health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5
ports:
@@ -25,12 +28,12 @@ jobs:
steps:
- name: Set up ${{ matrix.go-version }}
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Test
run: make test
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/doctests.yaml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/doctests.yaml
new file mode 100644
index 0000000..6e49e64
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/doctests.yaml
@@ -0,0 +1,41 @@
+name: Documentation Tests
+
+on:
+ push:
+ branches: [master, examples]
+ pull_request:
+ branches: [master, examples]
+
+permissions:
+ contents: read
+
+jobs:
+ doctests:
+ name: doctests
+ runs-on: ubuntu-latest
+
+ services:
+ redis-stack:
+ image: redis/redis-stack-server:latest
+ options: >-
+ --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5
+ ports:
+ - 6379:6379
+
+ strategy:
+ fail-fast: false
+ matrix:
+ go-version: [ "1.18", "1.19", "1.20", "1.21" ]
+
+ steps:
+ - name: Set up ${{ matrix.go-version }}
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Test doc examples
+ working-directory: ./doctests
+ run: go test
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/golangci-lint.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/golangci-lint.yml
new file mode 100644
index 0000000..a139f5d
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/golangci-lint.yml
@@ -0,0 +1,26 @@
+name: golangci-lint
+
+on:
+ push:
+ tags:
+ - v*
+ branches:
+ - master
+ - main
+ - v9
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ golangci:
+ permissions:
+ contents: read # for actions/checkout to fetch code
+ pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
+ name: lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v4
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/release-drafter.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/release-drafter.yml
new file mode 100644
index 0000000..6695abf
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/release-drafter.yml
@@ -0,0 +1,24 @@
+name: Release Drafter
+
+on:
+ push:
+ # branches to consider in the event; optional, defaults to all
+ branches:
+ - master
+
+permissions: {}
+jobs:
+ update_release_draft:
+ permissions:
+ pull-requests: write # to add label to PR (release-drafter/release-drafter)
+ contents: write # to create a github release (release-drafter/release-drafter)
+
+ runs-on: ubuntu-latest
+ steps:
+ # Drafts your next Release notes as Pull Requests are merged into "master"
+ - uses: release-drafter/release-drafter@v6
+ with:
+ # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml
+ config-name: release-drafter-config.yml
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/spellcheck.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/spellcheck.yml
new file mode 100644
index 0000000..f739a54
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/spellcheck.yml
@@ -0,0 +1,14 @@
+name: spellcheck
+on:
+ pull_request:
+jobs:
+ check-spelling:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Check Spelling
+ uses: rojopolis/spellcheck-github-actions@0.36.0
+ with:
+ config_path: .github/spellcheck-settings.yml
+ task_name: Markdown
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/stale-issues.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/stale-issues.yml
new file mode 100644
index 0000000..445af1c
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/stale-issues.yml
@@ -0,0 +1,25 @@
+name: "Close stale issues"
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+permissions: {}
+jobs:
+ stale:
+ permissions:
+ issues: write # to close stale issues (actions/stale)
+ pull-requests: write # to close stale PRs (actions/stale)
+
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@v9
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'This issue is marked stale. It will be closed in 30 days if it is not updated.'
+ stale-pr-message: 'This pull request is marked stale. It will be closed in 30 days if it is not updated.'
+ days-before-stale: 365
+ days-before-close: 30
+ stale-issue-label: "Stale"
+ stale-pr-label: "Stale"
+ operations-per-run: 10
+ remove-stale-when-updated: true
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/test-redis-enterprise.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/test-redis-enterprise.yml
new file mode 100644
index 0000000..82946dd
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/test-redis-enterprise.yml
@@ -0,0 +1,59 @@
+name: RE Tests
+
+on:
+ push:
+ branches: [master]
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ build:
+ name: build
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ go-version: [1.21.x]
+ re-build: ["7.2.4-108"]
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Clone Redis EE docker repository
+ uses: actions/checkout@v4
+ with:
+ repository: RedisLabs/redis-ee-docker
+ path: redis-ee
+
+ - name: Set up ${{ matrix.go-version }}
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Build cluster
+ working-directory: redis-ee
+ env:
+ IMAGE: "redislabs/redis-internal:${{ matrix.re-build }}"
+ RE_USERNAME: ${{ secrets.RE_USERNAME }}
+ RE_PASS: ${{ secrets.RE_PASS }}
+ RE_CLUSTER_NAME: ${{ secrets.RE_CLUSTER_NAME }}
+ RE_USE_OSS_CLUSTER: false
+ RE_DB_PORT: ${{ secrets.RE_DB_PORT }}
+ DOCKER_ACCESS_TOKEN: ${{ secrets.DOCKER_ACCESS_TOKEN }}
+ DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
+ run: ./build.sh
+
+ - name: Test
+ env:
+ RE_CLUSTER: "1"
+ run: |
+ go test \
+ --ginkgo.skip-file="ring_test.go" \
+ --ginkgo.skip-file="sentinel_test.go" \
+ --ginkgo.skip-file="osscluster_test.go" \
+ --ginkgo.skip-file="pubsub_test.go" \
+ --ginkgo.skip-file="gears_commands_test.go" \
+ --ginkgo.label-filter='!NonRedisEnterprise'
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.gitignore b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.gitignore
new file mode 100644
index 0000000..6f86889
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.gitignore
@@ -0,0 +1,6 @@
+*.rdb
+testdata/*
+.idea/
+.DS_Store
+*.tar.gz
+*.dic \ No newline at end of file
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.golangci.yml
index de51455..de51455 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.golangci.yml
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.prettierrc.yml
index 8b7f044..8b7f044 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.prettierrc.yml
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CHANGELOG.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CHANGELOG.md
new file mode 100644
index 0000000..297438a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CHANGELOG.md
@@ -0,0 +1,124 @@
+## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29)
+
+
+### Features
+
+* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602))
+* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe))
+* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af))
+
+
+
+## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01)
+
+
+### Bug Fixes
+
+* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241))
+
+
+### Features
+
+* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e))
+* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8))
+* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af))
+
+
+
+## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02)
+
+### New Features
+
+- feat(scan): scan time.Time sets the default decoding (#2413)
+- Add support for CLUSTER LINKS command (#2504)
+- Add support for acl dryrun command (#2502)
+- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500)
+- Add support for LCS Command (#2480)
+- Add support for BZMPOP (#2456)
+- Adding support for ZMPOP command (#2408)
+- Add support for LMPOP (#2440)
+- feat: remove pool unused fields (#2438)
+- Expiretime and PExpireTime (#2426)
+- Implement `FUNCTION` group of commands (#2475)
+- feat(zadd): add ZAddLT and ZAddGT (#2429)
+- Add: Support for COMMAND LIST command (#2491)
+- Add support for BLMPOP (#2442)
+- feat: check pipeline.Do to prevent confusion with Exec (#2517)
+- Function stats, function kill, fcall and fcall_ro (#2486)
+- feat: Add support for CLUSTER SHARDS command (#2507)
+- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498)
+
+### Fixed
+
+- fix: eval api cmd.SetFirstKeyPos (#2501)
+- fix: limit the number of connections created (#2441)
+- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479)
+- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458)
+- fix: group lag can be null (#2448)
+
+### Maintenance
+
+- Updating to the latest version of redis (#2508)
+- Allowing for running tests on a port other than the fixed 6380 (#2466)
+- redis 7.0.8 in tests (#2450)
+- docs: Update redisotel example for v9 (#2425)
+- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476)
+- chore: add Chinese translation (#2436)
+- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421)
+- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420)
+- chore(deps): bump actions/setup-go from 3 to 4 (#2495)
+- docs: add instructions for the HSet api (#2503)
+- docs: add reading lag field comment (#2451)
+- test: update go mod before testing(go mod tidy) (#2423)
+- docs: fix comment typo (#2505)
+- test: remove testify (#2463)
+- refactor: change ListElementCmd to KeyValuesCmd. (#2443)
+- fix(appendArg): appendArg case special type (#2489)
+
+## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01)
+
+### Features
+
+* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65))
+
+## v9 2023-01-30
+
+### Breaking
+
+- Changed Pipelines to not be thread-safe any more.
+
+### Added
+
+- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was
+ contributed by @monkey92t who has done the majority of work in this release.
+- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts
+ and deadlines. See
+ [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details.
+- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example,
+ `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`.
+- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html)
+- Added `redis.HasErrorPrefix` to help working with errors.
+
+### Changed
+
+- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is
+ completely gone in v9.
+- Reworked hook interface and added `DialHook`.
+- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See
+ [example](example/otel) and
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html).
+- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making
+ an allocation.
+- Renamed the option `MaxConnAge` to `ConnMaxLifetime`.
+- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`.
+- Removed connection reaper in favor of `MaxIdleConns`.
+- Removed `WithContext` since `context.Context` can be passed directly as an arg.
+- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and
+ it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to
+ reset commands for some reason.
+
+### Fixed
+
+- Improved and fixed pipeline retries.
+- As usually, added support for more commands and fixed some bugs.
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CONTRIBUTING.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CONTRIBUTING.md
new file mode 100644
index 0000000..90030b8
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CONTRIBUTING.md
@@ -0,0 +1,101 @@
+# Contributing
+
+## Introduction
+
+We appreciate your interest in considering contributing to go-redis.
+Community contributions mean a lot to us.
+
+## Contributions we need
+
+You may already know how you'd like to contribute, whether it's a fix for a bug you
+encountered, or a new feature your team wants to use.
+
+If you don't know where to start, consider improving
+documentation, bug triaging, and writing tutorials are all examples of
+helpful contributions that mean less work for you.
+
+## Your First Contribution
+
+Unsure where to begin contributing? You can start by looking through
+[help-wanted
+issues](https://github.com/redis/go-redis/issues?q=is%3Aopen+is%3Aissue+label%3ahelp-wanted).
+
+Never contributed to open source before? Here are a couple of friendly
+tutorials:
+
+- <http://makeapullrequest.com/>
+- <http://www.firsttimersonly.com/>
+
+## Getting Started
+
+Here's how to get started with your code contribution:
+
+1. Create your own fork of go-redis
+2. Do the changes in your fork
+3. If you need a development environment, run `make test`. Note: this clones and builds the latest release of [redis](https://redis.io). You also need a redis-stack-server docker, in order to run the capabilities tests. This can be started by running:
+ ```docker run -p 6379:6379 -it redis/redis-stack-server:edge```
+4. While developing, make sure the tests pass by running `make tests`
+5. If you like the change and think the project could use it, send a
+ pull request
+
+To see what else is part of the automation, run `invoke -l`
+
+## Testing
+
+Call `make test` to run all tests, including linters.
+
+Continuous Integration uses these same wrappers to run all of these
+tests against multiple versions of python. Feel free to test your
+changes against all the go versions supported, as declared by the
+[build.yml](./.github/workflows/build.yml) file.
+
+### Troubleshooting
+
+If you get any errors when running `make test`, make sure
+that you are using supported versions of Docker and go.
+
+## How to Report a Bug
+
+### Security Vulnerabilities
+
+**NOTE**: If you find a security vulnerability, do NOT open an issue.
+Email [Redis Open Source (<oss@redis.com>)](mailto:oss@redis.com) instead.
+
+In order to determine whether you are dealing with a security issue, ask
+yourself these two questions:
+
+- Can I access something that's not mine, or something I shouldn't
+ have access to?
+- Can I disable something for other people?
+
+If the answer to either of those two questions are *yes*, then you're
+probably dealing with a security issue. Note that even if you answer
+*no* to both questions, you may still be dealing with a security
+issue, so if you're unsure, just email [us](mailto:oss@redis.com).
+
+### Everything Else
+
+When filing an issue, make sure to answer these five questions:
+
+1. What version of go-redis are you using?
+2. What version of redis are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+## Suggest a feature or enhancement
+
+If you'd like to contribute a new feature, make sure you check our
+issue list to see if someone has already proposed it. Work may already
+be underway on the feature you want or we may have rejected a
+feature like it already.
+
+If you don't see anything, open a new issue that describes the feature
+you would like and how it should work.
+
+## Code review process
+
+The core team regularly looks at pull requests. We will provide
+feedback as soon as possible. After receiving our feedback, please respond
+within two weeks. After that time, we may close your PR if it isn't
+showing any activity.
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/LICENSE
index 298bed9..f4967db 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2013 The github.com/go-redis/redis Authors.
+Copyright (c) 2013 The github.com/redis/go-redis Authors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/Makefile b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/Makefile
new file mode 100644
index 0000000..dc2fe78
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/Makefile
@@ -0,0 +1,44 @@
+GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+test: testdeps
+ set -e; for dir in $(GO_MOD_DIRS); do \
+ echo "go test in $${dir}"; \
+ (cd "$${dir}" && \
+ go mod tidy -compat=1.18 && \
+ go test && \
+ go test ./... -short -race && \
+ go test ./... -run=NONE -bench=. -benchmem && \
+ env GOOS=linux GOARCH=386 go test && \
+ go vet); \
+ done
+ cd internal/customvet && go build .
+ go vet -vettool ./internal/customvet/customvet
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+ go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench fmt
+
+build:
+ go build .
+
+testdata/redis:
+ mkdir -p $@
+ wget -qO- https://download.redis.io/releases/redis-7.2.1.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+ cd $< && make all
+
+fmt:
+ gofumpt -w ./
+ goimports -w -local github.com/redis/go-redis ./
+
+go_mod_tidy:
+ set -e; for dir in $(GO_MOD_DIRS); do \
+ echo "go mod tidy in $${dir}"; \
+ (cd "$${dir}" && \
+ go get -u ./... && \
+ go mod tidy -compat=1.18); \
+ done
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/README.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/README.md
new file mode 100644
index 0000000..043d3f0
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/README.md
@@ -0,0 +1,274 @@
+# Redis client for Go
+
+[![build workflow](https://github.com/redis/go-redis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/go-redis/actions)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/redis/go-redis/v9)](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc)
+[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
+[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
+
+> go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can
+> use it to monitor applications and set up automatic alerts to receive notifications via email,
+> Slack, Telegram, and others.
+>
+> See [OpenTelemetry](https://github.com/redis/go-redis/tree/master/example/otel) example which
+> demonstrates how you can use Uptrace to monitor go-redis.
+
+## How do I Redis?
+
+[Learn for free at Redis University](https://university.redis.com/)
+
+[Build faster with the Redis Launchpad](https://launchpad.redis.com/)
+
+[Try the Redis Cloud](https://redis.com/try-free/)
+
+[Dive in developer tutorials](https://developer.redis.com/)
+
+[Join the Redis community](https://redis.com/community/)
+
+[Work at Redis](https://redis.com/company/careers/jobs/)
+
+## Documentation
+
+- [English](https://redis.uptrace.dev)
+- [简体中文](https://redis.uptrace.dev/zh/)
+
+## Resources
+
+- [Discussions](https://github.com/redis/go-redis/discussions)
+- [Chat](https://discord.gg/rWtp5Aj)
+- [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9)
+- [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples)
+
+## Ecosystem
+
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
+
+This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed
+key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol.
+
+## Features
+
+- Redis commands except QUIT and SYNC.
+- Automatic connection pooling.
+- [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html).
+- [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html).
+- [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html).
+- [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html).
+- [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html).
+- [Redis Ring](https://redis.uptrace.dev/guide/ring.html).
+- [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html).
+- [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/)
+
+## Installation
+
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
+
+```shell
+go mod init github.com/my/repo
+```
+
+Then install go-redis/**v9**:
+
+```shell
+go get github.com/redis/go-redis/v9
+```
+
+## Quickstart
+
+```go
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := rdb.Get(ctx, "key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := rdb.Get(ctx, "key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exist
+}
+```
+
+The above can be modified to specify the version of the RESP protocol by adding the `protocol`
+option to the `Options` struct:
+
+```go
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3
+ })
+
+```
+
+### Connecting via a redis url
+
+go-redis also supports connecting via the
+[redis uri specification](https://github.com/redis/redis-specifications/tree/master/uri/redis.txt).
+The example below demonstrates how the connection can easily be configured using a string, adhering
+to this specification.
+
+```go
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9"
+)
+
+func ExampleClient() *redis.Client {
+ url := "redis://user:password@localhost:6379/0?protocol=3"
+ opts, err := redis.ParseURL(url)
+ if err != nil {
+ panic(err)
+ }
+
+ return redis.NewClient(opts)
+}
+
+```
+
+
+### Advanced Configuration
+
+go-redis supports extending the client identification phase to allow projects to send their own custom client identification.
+
+#### Default Client Identification
+
+By default, go-redis automatically sends the client library name and version during the connection process. This feature is available in redis-server as of version 7.2. As a result, the command is "fire and forget", meaning it should fail silently, in the case that the redis server does not support this feature.
+
+#### Disabling Identity Verification
+
+When connection identity verification is not required or needs to be explicitly disabled, a `DisableIndentity` configuration option exists. In V10 of this library, `DisableIndentity` will become `DisableIdentity` in order to fix the associated typo.
+
+To disable verification, set the `DisableIndentity` option to `true` in the Redis client options:
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "",
+ DB: 0,
+ DisableIndentity: true, // Disable set-info on connect
+})
+```
+
+## Contributing
+
+Please see [out contributing guidelines](CONTRIBUTING.md) to help us improve this library!
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+
+## Run the test
+
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are defined in `main_test.go`:
+
+```go
+var (
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link
+to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+
+```shell
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+
+```shell
+go test
+```
+
+Another option is to run your specific tests with an already running redis. The example below, tests
+against a redis running on port 9999.:
+
+```shell
+REDIS_PORT=9999 go test <your options>
+```
+
+## See also
+
+- [Golang ORM](https://bun.uptrace.dev) for PostgreSQL, MySQL, MSSQL, and SQLite
+- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/)
+- [Golang HTTP router](https://bunrouter.uptrace.dev/)
+- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse)
+
+## Contributors
+
+Thanks to all the people who already contributed!
+
+<a href="https://github.com/redis/go-redis/graphs/contributors">
+ <img src="https://contributors-img.web.app/image?repo=redis/go-redis" />
+</a>
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/RELEASING.md
index 1115db4..1115db4 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/RELEASING.md
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/acl_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/acl_commands.go
new file mode 100644
index 0000000..06847be
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/acl_commands.go
@@ -0,0 +1,35 @@
+package redis
+
+import "context"
+
+type ACLCmdable interface {
+ ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd
+ ACLLog(ctx context.Context, count int64) *ACLLogCmd
+ ACLLogReset(ctx context.Context) *StatusCmd
+}
+
+func (c cmdable) ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd {
+ args := make([]interface{}, 0, 3+len(command))
+ args = append(args, "acl", "dryrun", username)
+ args = append(args, command...)
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLLog(ctx context.Context, count int64) *ACLLogCmd {
+ args := make([]interface{}, 0, 3)
+ args = append(args, "acl", "log")
+ if count > 0 {
+ args = append(args, count)
+ }
+ cmd := NewACLLogCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLLogReset(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "acl", "log", "reset")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_decode_test.go
index 8382806..16bdf2c 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_decode_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"time"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal/proto"
)
var ctx = context.TODO()
@@ -18,15 +18,19 @@ type ClientStub struct {
resp []byte
}
+var initHello = []byte("%1\r\n+proto\r\n:3\r\n")
+
func NewClientStub(resp []byte) *ClientStub {
stub := &ClientStub{
resp: resp,
}
+
stub.Cmdable = NewClient(&Options{
PoolSize: 128,
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
- return stub.stubConn(), nil
+ return stub.stubConn(initHello), nil
},
+ DisableIndentity: true,
})
return stub
}
@@ -38,10 +42,12 @@ func NewClusterClientStub(resp []byte) *ClientStub {
client := NewClusterClient(&ClusterOptions{
PoolSize: 128,
- Addrs: []string{"127.0.0.1:6379"},
+ Addrs: []string{":6379"},
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
- return stub.stubConn(), nil
+ return stub.stubConn(initHello), nil
},
+ DisableIndentity: true,
+
ClusterSlots: func(_ context.Context) ([]ClusterSlot, error) {
return []ClusterSlot{
{
@@ -53,30 +59,31 @@ func NewClusterClientStub(resp []byte) *ClientStub {
},
})
- // init command.
- tmpClient := NewClient(&Options{Addr: ":6379"})
- cmdsInfo, err := tmpClient.Command(ctx).Result()
- _ = tmpClient.Close()
- client.cmdsInfoCache = newCmdsInfoCache(func(_ context.Context) (map[string]*CommandInfo, error) {
- return cmdsInfo, err
- })
-
stub.Cmdable = client
return stub
}
-func (c *ClientStub) stubConn() *ConnStub {
+func (c *ClientStub) stubConn(init []byte) *ConnStub {
return &ConnStub{
+ init: init,
resp: c.resp,
}
}
type ConnStub struct {
+ init []byte
resp []byte
pos int
}
func (c *ConnStub) Read(b []byte) (n int, err error) {
+ // Return conn.init()
+ if len(c.init) > 0 {
+ n = copy(b, c.init)
+ c.init = c.init[n:]
+ return n, nil
+ }
+
if len(c.resp) == 0 {
return 0, io.EOF
}
@@ -106,7 +113,7 @@ func BenchmarkDecode(b *testing.B) {
}
benchmarks := []Benchmark{
- {"single", NewClientStub},
+ {"server", NewClientStub},
{"cluster", NewClusterClientStub},
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_test.go
index ba81ce8..8e23303 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_test.go
@@ -10,7 +10,7 @@ import (
"testing"
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
func benchmarkRedisClient(ctx context.Context, poolSize int) *redis.Client {
@@ -223,7 +223,7 @@ func BenchmarkZAdd(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- err := client.ZAdd(ctx, "key", &redis.Z{
+ err := client.ZAdd(ctx, "key", redis.Z{
Score: float64(1),
Member: "hello",
}).Err()
@@ -273,36 +273,6 @@ func BenchmarkXRead(b *testing.B) {
})
}
-var clientSink *redis.Client
-
-func BenchmarkWithContext(b *testing.B) {
- ctx := context.Background()
- rdb := benchmarkRedisClient(ctx, 10)
- defer rdb.Close()
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- clientSink = rdb.WithContext(ctx)
- }
-}
-
-var ringSink *redis.Ring
-
-func BenchmarkRingWithContext(b *testing.B) {
- ctx := context.Background()
- rdb := redis.NewRing(&redis.RingOptions{})
- defer rdb.Close()
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- ringSink = rdb.WithContext(ctx)
- }
-}
-
//------------------------------------------------------------------------------
func newClusterScenario() *clusterScenario {
@@ -396,17 +366,77 @@ func BenchmarkClusterSetString(b *testing.B) {
})
}
-var clusterSink *redis.ClusterClient
+func BenchmarkExecRingSetAddrsCmd(b *testing.B) {
+ const (
+ ringShard1Name = "ringShardOne"
+ ringShard2Name = "ringShardTwo"
+ )
-func BenchmarkClusterWithContext(b *testing.B) {
- ctx := context.Background()
- rdb := redis.NewClusterClient(&redis.ClusterOptions{})
- defer rdb.Close()
+ for _, port := range []string{ringShard1Port, ringShard2Port} {
+ if _, err := startRedis(port); err != nil {
+ b.Fatal(err)
+ }
+ }
- b.ResetTimer()
- b.ReportAllocs()
+ b.Cleanup(func() {
+ for _, p := range processes {
+ if err := p.Close(); err != nil {
+ b.Errorf("Failed to stop redis process: %v", err)
+ }
+ }
+ processes = nil
+ })
+
+ ring := redis.NewRing(&redis.RingOptions{
+ Addrs: map[string]string{
+ "ringShardOne": ":" + ringShard1Port,
+ },
+ NewClient: func(opt *redis.Options) *redis.Client {
+ // Simulate slow shard creation
+ time.Sleep(100 * time.Millisecond)
+ return redis.NewClient(opt)
+ },
+ })
+ defer ring.Close()
+ if _, err := ring.Ping(context.Background()).Result(); err != nil {
+ b.Fatal(err)
+ }
+
+ // Continuously update addresses by adding and removing one address
+ updatesDone := make(chan struct{})
+ defer func() { close(updatesDone) }()
+ go func() {
+ ticker := time.NewTicker(10 * time.Millisecond)
+ defer ticker.Stop()
+ for i := 0; ; i++ {
+ select {
+ case <-ticker.C:
+ if i%2 == 0 {
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ })
+ } else {
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ ringShard2Name: ":" + ringShard2Port,
+ })
+ }
+ case <-updatesDone:
+ return
+ }
+ }
+ }()
+
+ b.ResetTimer()
for i := 0; i < b.N; i++ {
- clusterSink = rdb.WithContext(ctx)
+ if _, err := ring.Ping(context.Background()).Result(); err != nil {
+ if err == redis.ErrClosed {
+ // The shard client could be closed while ping command is in progress
+ continue
+ } else {
+ b.Fatal(err)
+ }
+ }
}
}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands.go
new file mode 100644
index 0000000..d9fc50d
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands.go
@@ -0,0 +1,163 @@
+package redis
+
+import (
+ "context"
+ "errors"
+)
+
+type BitMapCmdable interface {
+ GetBit(ctx context.Context, key string, offset int64) *IntCmd
+ SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
+ BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
+ BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+ BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd
+ BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd
+}
+
+func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "getbit", key, offset)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+ Unit string // BYTE(default) | BIT
+}
+
+const BitCountIndexByte string = "BYTE"
+const BitCountIndexBit string = "BIT"
+
+func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
+ args := []interface{}{"bitcount", key}
+ if bitCount != nil {
+ if bitCount.Unit == "" {
+ bitCount.Unit = "BYTE"
+ }
+ if bitCount.Unit != BitCountIndexByte && bitCount.Unit != BitCountIndexBit {
+ cmd := NewIntCmd(ctx)
+ cmd.SetErr(errors.New("redis: invalid bitcount index"))
+ return cmd
+ }
+ args = append(
+ args,
+ bitCount.Start,
+ bitCount.End,
+ string(bitCount.Unit),
+ )
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
+ return c.bitOp(ctx, "not", destKey, key)
+}
+
+// BitPos is an API before Redis version 7.0, cmd: bitpos key bit start end
+// if you need the `byte | bit` parameter, please use `BitPosSpan`.
+func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitPosSpan supports the `byte | bit` parameters in redis version 7.0,
+// the bitpos command defaults to using byte type for the `start-end` range,
+// which means it counts in bytes from start to end. you can set the value
+// of "span" to determine the type of `start-end`.
+// span = "bit", cmd: bitpos key bit start end bit
+// span = "byte", cmd: bitpos key bit start end byte
+func (c cmdable) BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd {
+ cmd := NewIntCmd(ctx, "bitpos", key, bit, start, end, span)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitField accepts multiple values:
+// - BitField("set", "i1", "offset1", "value1","cmd2", "type2", "offset2", "value2")
+// - BitField([]string{"cmd1", "type1", "offset1", "value1","cmd2", "type2", "offset2", "value2"})
+// - BitField([]interface{}{"cmd1", "type1", "offset1", "value1","cmd2", "type2", "offset2", "value2"})
+func (c cmdable) BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "bitfield"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitFieldRO - Read-only variant of the BITFIELD command.
+// It is like the original BITFIELD but only accepts GET subcommand and can safely be used in read-only replicas.
+// - BitFieldRO(ctx, key, "<Encoding0>", "<Offset0>", "<Encoding1>","<Offset1>")
+func (c cmdable) BitFieldRO(ctx context.Context, key string, values ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "BITFIELD_RO"
+ args[1] = key
+ if len(values)%2 != 0 {
+ panic("BitFieldRO: invalid number of arguments, must be even")
+ }
+ for i := 0; i < len(values); i += 2 {
+ args = append(args, "GET", values[i], values[i+1])
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands_test.go
new file mode 100644
index 0000000..f3cc320
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands_test.go
@@ -0,0 +1,98 @@
+package redis_test
+
+import (
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+ "github.com/redis/go-redis/v9"
+)
+
+type bitCountExpected struct {
+ Start int64
+ End int64
+ Expected int64
+}
+
+var _ = Describe("BitCountBite", func() {
+ var client *redis.Client
+ key := "bit_count_test"
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ values := []int{0, 1, 0, 0, 1, 0, 1, 0, 1, 1}
+ for i, v := range values {
+ cmd := client.SetBit(ctx, key, int64(i), v)
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ }
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("bit count bite", func() {
+ var expected = []bitCountExpected{
+ {0, 0, 0},
+ {0, 1, 1},
+ {0, 2, 1},
+ {0, 3, 1},
+ {0, 4, 2},
+ {0, 5, 2},
+ {0, 6, 3},
+ {0, 7, 3},
+ {0, 8, 4},
+ {0, 9, 5},
+ }
+
+ for _, e := range expected {
+ cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End, Unit: redis.BitCountIndexBit})
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal(e.Expected))
+ }
+ })
+})
+
+var _ = Describe("BitCountByte", func() {
+ var client *redis.Client
+ key := "bit_count_test"
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ values := []int{0, 0, 0, 0, 0, 0, 0, 1, 1, 1}
+ for i, v := range values {
+ cmd := client.SetBit(ctx, key, int64(i), v)
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ }
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("bit count byte", func() {
+ var expected = []bitCountExpected{
+ {0, 0, 1},
+ {0, 1, 3},
+ }
+
+ for _, e := range expected {
+ cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End, Unit: redis.BitCountIndexByte})
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal(e.Expected))
+ }
+ })
+
+ It("bit count byte with no unit specified", func() {
+ var expected = []bitCountExpected{
+ {0, 0, 1},
+ {0, 1, 3},
+ }
+
+ for _, e := range expected {
+ cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End})
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal(e.Expected))
+ }
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/cluster_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/cluster_commands.go
new file mode 100644
index 0000000..0caf097
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/cluster_commands.go
@@ -0,0 +1,192 @@
+package redis
+
+import "context"
+
+type ClusterCmdable interface {
+ ClusterMyShardID(ctx context.Context) *StringCmd
+ ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+ ClusterShards(ctx context.Context) *ClusterShardsCmd
+ ClusterLinks(ctx context.Context) *ClusterLinksCmd
+ ClusterNodes(ctx context.Context) *StringCmd
+ ClusterMeet(ctx context.Context, host, port string) *StatusCmd
+ ClusterForget(ctx context.Context, nodeID string) *StatusCmd
+ ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
+ ClusterResetSoft(ctx context.Context) *StatusCmd
+ ClusterResetHard(ctx context.Context) *StatusCmd
+ ClusterInfo(ctx context.Context) *StringCmd
+ ClusterKeySlot(ctx context.Context, key string) *IntCmd
+ ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
+ ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
+ ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
+ ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ClusterSaveConfig(ctx context.Context) *StatusCmd
+ ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
+ ClusterFailover(ctx context.Context) *StatusCmd
+ ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ReadOnly(ctx context.Context) *StatusCmd
+ ReadWrite(ctx context.Context) *StatusCmd
+}
+
+func (c cmdable) ClusterMyShardID(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "myshardid")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
+ cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterShards(ctx context.Context) *ClusterShardsCmd {
+ cmd := NewClusterShardsCmd(ctx, "cluster", "shards")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterLinks(ctx context.Context) *ClusterLinksCmd {
+ cmd := NewClusterLinksCmd(ctx, "cluster", "links")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "nodes")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "delslots"
+ for i, slot := range slots {
+ args[2+i] = slot
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterDelSlots(ctx, slots...)
+}
+
+func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "failover")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "addslots"
+ for i, num := range slots {
+ args[2+i] = num
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterAddSlots(ctx, slots...)
+}
+
+func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readonly")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readwrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command.go
new file mode 100644
index 0000000..9fb9a83
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command.go
@@ -0,0 +1,5483 @@
+package redis
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hscan"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+type Cmder interface {
+ // command name.
+ // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster".
+ Name() string
+
+ // full command name.
+ // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster info".
+ FullName() string
+
+ // all args of the command.
+ // e.g. "set k v ex 10" -> "[set k v ex 10]".
+ Args() []interface{}
+
+ // format request and response string.
+ // e.g. "set k v ex 10" -> "set k v ex 10: OK", "get k" -> "get k: v".
+ String() string
+
+ stringArg(int) string
+ firstKeyPos() int8
+ SetFirstKeyPos(int8)
+
+ readTimeout() *time.Duration
+ readReply(rd *proto.Reader) error
+
+ SetErr(error)
+ Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ if cmd.Err() == nil {
+ cmd.SetErr(e)
+ }
+ }
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := writeCmd(wr, cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+ return wr.WriteArgs(cmd.Args())
+}
+
+func cmdFirstKeyPos(cmd Cmder) int {
+ if pos := cmd.firstKeyPos(); pos != 0 {
+ return int(pos)
+ }
+
+ switch cmd.Name() {
+ case "eval", "evalsha", "eval_ro", "evalsha_ro":
+ if cmd.stringArg(2) != "0" {
+ return 3
+ }
+
+ return 0
+ case "publish":
+ return 1
+ case "memory":
+ // https://github.com/redis/redis/issues/7493
+ if cmd.stringArg(1) == "usage" {
+ return 2
+ }
+ }
+ return 1
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ b := make([]byte, 0, 64)
+
+ for i, arg := range cmd.Args() {
+ if i > 0 {
+ b = append(b, ' ')
+ }
+ b = internal.AppendArg(b, arg)
+ }
+
+ if err := cmd.Err(); err != nil {
+ b = append(b, ": "...)
+ b = append(b, err.Error()...)
+ } else if val != nil {
+ b = append(b, ": "...)
+ b = internal.AppendArg(b, val)
+ }
+
+ return util.BytesToString(b)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ ctx context.Context
+ args []interface{}
+ err error
+ keyPos int8
+
+ _readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+ if len(cmd.args) == 0 {
+ return ""
+ }
+ // Cmd name must be lower cased.
+ return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) FullName() string {
+ switch name := cmd.Name(); name {
+ case "cluster", "command":
+ if len(cmd.args) == 1 {
+ return name
+ }
+ if s2, ok := cmd.args[1].(string); ok {
+ return name + " " + s2
+ }
+ return name
+ default:
+ return name
+ }
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+ return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+ if pos < 0 || pos >= len(cmd.args) {
+ return ""
+ }
+ arg := cmd.args[pos]
+ switch v := arg.(type) {
+ case string:
+ return v
+ default:
+ // TODO: consider using appendArg
+ return fmt.Sprint(v)
+ }
+}
+
+func (cmd *baseCmd) firstKeyPos() int8 {
+ return cmd.keyPos
+}
+
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
+ cmd.keyPos = keyPos
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+ cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ baseCmd
+
+ val interface{}
+}
+
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
+ return &Cmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) SetVal(val interface{}) {
+ cmd.val = val
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) Text() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+ switch val := val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
+}
+
+func (cmd *Cmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+ switch val := val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+ switch val := val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+ switch val := val.(type) {
+ case int64:
+ return float32(val), nil
+ case string:
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+ switch val := val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+ switch val := val.(type) {
+ case bool:
+ return val, nil
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
+ }
+}
+
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case []interface{}:
+ return val, nil
+ default:
+ return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+ }
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ ss := make([]string, len(slice))
+ for i, iface := range slice {
+ val, err := toString(iface)
+ if err != nil {
+ return nil, err
+ }
+ ss[i] = val
+ }
+ return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]int64, len(slice))
+ for i, iface := range slice {
+ val, err := toInt64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]uint64, len(slice))
+ for i, iface := range slice {
+ val, err := toUint64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float32, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat32(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float64, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat64(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ bools := make([]bool, len(slice))
+ for i, iface := range slice {
+ val, err := toBool(iface)
+ if err != nil {
+ return nil, err
+ }
+ bools[i] = val
+ }
+ return bools, nil
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ baseCmd
+
+ val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+ cmd.val = val
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ // Pass the list of keys and values.
+ // Skip the first two args for: HMGET key
+ var args []interface{}
+ if cmd.args[0] == "hmget" {
+ args = cmd.args[2:]
+ } else {
+ // Otherwise, it's: MGET field field ...
+ args = cmd.args[1:]
+ }
+
+ return hscan.Scan(dst, args, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadSlice()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StatusCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ baseCmd
+
+ val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
+ return &IntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntCmd) SetVal(val int64) {
+ cmd.val = val
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+ return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadInt()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+ baseCmd
+
+ val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+ return &IntSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+ cmd.val = val
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+ return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]int64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+ cmd.val = val
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ switch n {
+ // -2 if the key does not exist
+ // -1 if the key exists but has no associated expire
+ case -2, -1:
+ cmd.val = time.Duration(n)
+ default:
+ cmd.val = time.Duration(n) * cmd.precision
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+ baseCmd
+
+ val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
+ return &TimeCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TimeCmd) SetVal(val time.Time) {
+ cmd.val = val
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+ return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ second, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ microsecond, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val = time.Unix(second, microsecond*1000)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ baseCmd
+
+ val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolCmd) SetVal(val bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadBool()
+
+ // `SET key value NX` returns nil when key already exists. But
+ // `SETNX key value` returns bool (0/1). So convert nil to bool.
+ if err == Nil {
+ cmd.val = false
+ err = nil
+ }
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
+ return &StringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+ return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return strconv.ParseBool(cmd.val)
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ f, err := strconv.ParseFloat(cmd.Val(), 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+ if cmd.err != nil {
+ return time.Time{}, cmd.err
+ }
+ return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ baseCmd
+
+ val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatCmd) SetVal(val float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadFloat()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatSliceCmd struct {
+ baseCmd
+
+ val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+ return &FloatSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+ return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]float64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch num, err := rd.ReadFloat(); {
+ case err == Nil:
+ cmd.val[i] = 0
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = num
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ baseCmd
+
+ val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringSliceCmd) SetVal(val []string) {
+ cmd.val = val
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+ return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValue struct {
+ Key string
+ Value string
+}
+
+type KeyValueSliceCmd struct {
+ baseCmd
+
+ val []KeyValue
+}
+
+var _ Cmder = (*KeyValueSliceCmd)(nil)
+
+func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSliceCmd {
+ return &KeyValueSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValueSliceCmd) SetVal(val []KeyValue) {
+ cmd.val = val
+}
+
+func (cmd *KeyValueSliceCmd) Val() []KeyValue {
+ return cmd.val
+}
+
+func (cmd *KeyValueSliceCmd) Result() ([]KeyValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyValueSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Many commands will respond to two formats:
+// 1. 1) "one"
+// 2. (double) 1
+// 2. 1) "two"
+// 2. (double) 2
+//
+// OR:
+// 1. "two"
+// 2. (double) 2
+// 3. "one"
+// 4. (double) 1
+func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]KeyValue, 0)
+ return nil
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]KeyValue, n)
+ } else {
+ cmd.val = make([]KeyValue, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Value, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ baseCmd
+
+ val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]bool, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadBool(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type MapStringStringCmd struct {
+ baseCmd
+
+ val map[string]string
+}
+
+var _ Cmder = (*MapStringStringCmd)(nil)
+
+func NewMapStringStringCmd(ctx context.Context, args ...interface{}) *MapStringStringCmd {
+ return &MapStringStringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringStringCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *MapStringStringCmd) SetVal(val map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringStringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *MapStringStringCmd) Scan(dest interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ strct, err := hscan.Struct(dest)
+ if err != nil {
+ return err
+ }
+
+ for k, v := range cmd.val {
+ if err := strct.Scan(k, v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]string, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[key] = value
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type MapStringIntCmd struct {
+ baseCmd
+
+ val map[string]int64
+}
+
+var _ Cmder = (*MapStringIntCmd)(nil)
+
+func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd {
+ return &MapStringIntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringIntCmd) SetVal(val map[string]int64) {
+ cmd.val = val
+}
+
+func (cmd *MapStringIntCmd) Val() map[string]int64 {
+ return cmd.val
+}
+
+func (cmd *MapStringIntCmd) Result() (map[string]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringIntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = nn
+ }
+ return nil
+}
+
+// ------------------------------------------------------------------------------
+type MapStringSliceInterfaceCmd struct {
+ baseCmd
+ val map[string][]interface{}
+}
+
+func NewMapStringSliceInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringSliceInterfaceCmd {
+ return &MapStringSliceInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringSliceInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringSliceInterfaceCmd) SetVal(val map[string][]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringSliceInterfaceCmd) Result() (map[string][]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringSliceInterfaceCmd) Val() map[string][]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringSliceInterfaceCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make(map[string][]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[k] = make([]interface{}, nn)
+ for j := 0; j < nn; j++ {
+ value, err := rd.ReadReply()
+ if err != nil {
+ return err
+ }
+ cmd.val[k][j] = value
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+ baseCmd
+
+ val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
+ return &StringStructMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+ cmd.val = val
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+ return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]struct{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = struct{}{}
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+ baseCmd
+
+ val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+ cmd.val = val
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+ return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = readXMessageSlice(rd)
+ return err
+}
+
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ msgs := make([]XMessage, n)
+ for i := 0; i < len(msgs); i++ {
+ if msgs[i], err = readXMessage(rd); err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
+}
+
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return XMessage{}, err
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return XMessage{}, err
+ }
+
+ v, err := stringInterfaceMapParser(rd)
+ if err != nil {
+ if err != proto.Nil {
+ return XMessage{}, err
+ }
+ }
+
+ return XMessage{
+ ID: id,
+ Values: v,
+ }, nil
+}
+
+func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ m := make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+ Stream string
+ Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+ baseCmd
+
+ val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+ cmd.val = val
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+
+ var n int
+ if typ == proto.RespMap {
+ n, err = rd.ReadMapLen()
+ } else {
+ n, err = rd.ReadArrayLen()
+ }
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XStream, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if typ != proto.RespMap {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+ if cmd.val[i].Stream, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val[i].Messages, err = readXMessageSlice(rd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+ cmd.val = val
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ var err error
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+ cmd.val = &XPending{}
+
+ if cmd.val.Count, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ if cmd.val.Lower, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ if cmd.val.Higher, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val.Consumers = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val.Consumers[consumerName] = consumerPending
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ ID string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+ cmd.val = val
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XPendingExt, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+
+ if cmd.val[i].ID, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Consumer, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ idle, err := rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+
+ if cmd.val[i].RetryCount, err = rd.ReadInt(); err != nil && err != Nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimCmd struct {
+ baseCmd
+
+ start string
+ val []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+ return &XAutoClaimCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaim reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+ baseCmd
+
+ start string
+ val []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+ return &XAutoClaimJustIDCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaimJustID reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]string, nn)
+ for i := 0; i < nn; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+ baseCmd
+ val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+ Name string
+ Pending int64
+ Idle time.Duration
+ Inactive time.Duration
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+ return &XInfoConsumersCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "consumers", stream, group},
+ },
+ }
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+ cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+ return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoConsumer, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for f := 0; f < nn; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ cmd.val[i].Name, err = rd.ReadString()
+ case "pending":
+ cmd.val[i].Pending, err = rd.ReadInt()
+ case "idle":
+ var idle int64
+ idle, err = rd.ReadInt()
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+ case "inactive":
+ var inactive int64
+ inactive, err = rd.ReadInt()
+ cmd.val[i].Inactive = time.Duration(inactive) * time.Millisecond
+ default:
+ return fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+ baseCmd
+ val []XInfoGroup
+}
+
+type XInfoGroup struct {
+ Name string
+ Consumers int64
+ Pending int64
+ LastDeliveredID string
+ EntriesRead int64
+ Lag int64
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+ return &XInfoGroupsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "groups", stream},
+ },
+ }
+}
+
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+ cmd.val = val
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+ return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoGroup, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ group := &cmd.val[i]
+
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for j := 0; j < nn; j++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "consumers":
+ group.Consumers, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "pending":
+ group.Pending, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ case "lag":
+ group.Lag, err = rd.ReadInt()
+
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ if err != nil && err != Nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key)
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+ baseCmd
+ val *XInfoStream
+}
+
+type XInfoStream struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ Groups int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ FirstEntry XMessage
+ LastEntry XMessage
+ RecordedFirstEntryID string
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+ return &XInfoStreamCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "stream", stream},
+ },
+ }
+}
+
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = &XInfoStream{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "first-entry":
+ cmd.val.FirstEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "last-entry":
+ cmd.val.LastEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key)
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamFullCmd struct {
+ baseCmd
+ val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ Entries []XMessage
+ Groups []XInfoStreamGroup
+ RecordedFirstEntryID string
+}
+
+type XInfoStreamGroup struct {
+ Name string
+ LastDeliveredID string
+ EntriesRead int64
+ Lag int64
+ PelCount int64
+ Pending []XInfoStreamGroupPending
+ Consumers []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+ ID string
+ Consumer string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+ Name string
+ SeenTime time.Time
+ ActiveTime time.Time
+ PelCount int64
+ Pending []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+ ID string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+ return &XInfoStreamFullCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = &XInfoStreamFull{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "entries":
+ cmd.val.Entries, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = readStreamGroups(rd)
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+ return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ groups := make([]XInfoStreamGroup, 0, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ group := XInfoStreamGroup{}
+
+ for j := 0; j < nn; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "lag":
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ group.Lag, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "pel-count":
+ group.PelCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ case "pending":
+ group.Pending, err = readXInfoStreamGroupPending(rd)
+ if err != nil {
+ return nil, err
+ }
+ case "consumers":
+ group.Consumers, err = readXInfoStreamConsumers(rd)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+
+ groups = append(groups, group)
+ }
+
+ return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ pending := make([]XInfoStreamGroupPending, 0, n)
+
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamGroupPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ p.Consumer, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ pending = append(pending, p)
+ }
+
+ return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ consumers := make([]XInfoStreamConsumer, 0, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c := XInfoStreamConsumer{}
+
+ for f := 0; f < nn; f++ {
+ cKey, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch cKey {
+ case "name":
+ c.Name, err = rd.ReadString()
+ case "seen-time":
+ seen, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.SeenTime = time.UnixMilli(seen)
+ case "active-time":
+ active, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.ActiveTime = time.UnixMilli(active)
+ case "pel-count":
+ c.PelCount, err = rd.ReadInt()
+ case "pending":
+ pendingNumber, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+ for pn := 0; pn < pendingNumber; pn++ {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamConsumerPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = append(c.Pending, p)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM FULL reply", cKey)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ consumers = append(consumers, c)
+ }
+
+ return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+ cmd.val = val
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]Z, 0)
+ return nil
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+ baseCmd
+
+ val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+ return &ZWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+ cmd.val = val
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+ return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return err
+ }
+ cmd.val = &ZWithKey{}
+
+ if cmd.val.Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process cmdable
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+ cmd.page = page
+ cmd.cursor = cursor
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cursor, err := rd.ReadUint()
+ if err != nil {
+ return err
+ }
+ cmd.cursor = cursor
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.page = make([]string, n)
+
+ for i := 0; i < len(cmd.page); i++ {
+ if cmd.page[i], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ ID string
+ Addr string
+ NetworkingMetadata map[string]string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+ cmd.val = val
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterSlot, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ n, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if n < 2 {
+ return fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ }
+
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ // subtract start and end.
+ nodes := make([]ClusterNode, n-2)
+
+ for j := 0; j < len(nodes); j++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 2 || nn > 4 {
+ return fmt.Errorf("got %d elements in cluster info address, expected 2, 3, or 4", n)
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if nn >= 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ nodes[j].ID = id
+ }
+
+ if nn >= 4 {
+ metadataLength, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ networkingMetadata := make(map[string]string, metadataLength)
+
+ for i := 0; i < metadataLength; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ networkingMetadata[key] = value
+ }
+
+ nodes[j].NetworkingMetadata = networkingMetadata
+ }
+ }
+
+ cmd.val[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Store string
+ StoreDist string
+
+ // WithCoord+WithDist+WithGeoHash
+ withLen int
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: geoLocationArgs(q, args...),
+ },
+ q: q,
+ }
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ q.withLen++
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ q.withLen++
+ }
+ if q.WithGeoHash {
+ args = append(args, "withhash")
+ q.withLen++
+ }
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ if q.Store != "" {
+ args = append(args, "store")
+ args = append(args, q.Store)
+ }
+ if q.StoreDist != "" {
+ args = append(args, "storedist")
+ args = append(args, q.StoreDist)
+ }
+ return args
+}
+
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+ cmd.locations = locations
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.locations = make([]GeoLocation, n)
+
+ for i := 0; i < len(cmd.locations); i++ {
+ // only name
+ if cmd.q.withLen == 0 {
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // +name
+ if err = rd.ReadFixedArrayLen(cmd.q.withLen + 1); err != nil {
+ return err
+ }
+
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.q.WithDist {
+ if cmd.locations[i].Dist, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithGeoHash {
+ if cmd.locations[i].GeoHash, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ if cmd.locations[i].Longitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ if cmd.locations[i].Latitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+ Member string
+
+ // Latitude and Longitude when using FromLonLat option.
+ Longitude float64
+ Latitude float64
+
+ // Distance and unit when using ByRadius option.
+ // Can use m, km, ft, or mi. Default is km.
+ Radius float64
+ RadiusUnit string
+
+ // Height, width and unit when using ByBox option.
+ // Can be m, km, ft, or mi. Default is km.
+ BoxWidth float64
+ BoxHeight float64
+ BoxUnit string
+
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Count int
+ CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+ GeoSearchQuery
+
+ WithCoord bool
+ WithDist bool
+ WithHash bool
+}
+
+type GeoSearchStoreQuery struct {
+ GeoSearchQuery
+
+ // When using the StoreDist option, the command stores the items in a
+ // sorted set populated with their distance from the center of the circle or box,
+ // as a floating-point number, in the same unit specified for that shape.
+ StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithHash {
+ args = append(args, "withhash")
+ }
+
+ return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+ if q.Member != "" {
+ args = append(args, "frommember", q.Member)
+ } else {
+ args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+ }
+
+ if q.Radius > 0 {
+ if q.RadiusUnit == "" {
+ q.RadiusUnit = "km"
+ }
+ args = append(args, "byradius", q.Radius, q.RadiusUnit)
+ } else {
+ if q.BoxUnit == "" {
+ q.BoxUnit = "km"
+ }
+ args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+ }
+
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ if q.CountAny {
+ args = append(args, "any")
+ }
+ }
+
+ return args
+}
+
+type GeoSearchLocationCmd struct {
+ baseCmd
+
+ opt *GeoSearchLocationQuery
+ val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+ ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+ return &GeoSearchLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ opt: opt,
+ }
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+ cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+ return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]GeoLocation, n)
+ for i := 0; i < n; i++ {
+ _, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ var loc GeoLocation
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ if cmd.opt.WithDist {
+ loc.Dist, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithHash {
+ loc.GeoHash, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ loc.Longitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ loc.Latitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val[i] = loc
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+ cmd.val = val
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]*GeoPos, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ err = rd.ReadFixedArrayLen(2)
+ if err != nil {
+ if err == Nil {
+ cmd.val[i] = nil
+ continue
+ }
+ return err
+ }
+
+ longitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ latitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i] = &GeoPos{
+ Longitude: longitude,
+ Latitude: latitude,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ ACLFlags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+ cmd.val = val
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+ const numArgRedis5 = 6
+ const numArgRedis6 = 7
+ const numArgRedis7 = 10
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make(map[string]*CommandInfo, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch nn {
+ case numArgRedis5, numArgRedis6, numArgRedis7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7/10", nn)
+ }
+
+ cmdInfo := &CommandInfo{}
+ if cmdInfo.Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ arity, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Arity = int8(arity)
+
+ flagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Flags = make([]string, flagLen)
+ for f := 0; f < len(cmdInfo.Flags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.Flags[f] = ""
+ case err != nil:
+ return err
+ default:
+ if !cmdInfo.ReadOnly && s == "readonly" {
+ cmdInfo.ReadOnly = true
+ }
+ cmdInfo.Flags[f] = s
+ }
+ }
+
+ firstKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.StepCount = int8(stepCount)
+
+ if nn >= numArgRedis6 {
+ aclFlagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.ACLFlags = make([]string, aclFlagLen)
+ for f := 0; f < len(cmdInfo.ACLFlags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.ACLFlags[f] = ""
+ case err != nil:
+ return err
+ default:
+ cmdInfo.ACLFlags[f] = s
+ }
+ }
+ }
+
+ if nn >= numArgRedis7 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ cmd.val[cmdInfo.Name] = cmdInfo
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+ fn func(ctx context.Context) (map[string]*CommandInfo, error)
+
+ once internal.Once
+ cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+ return &cmdsInfoCache{
+ fn: fn,
+ }
+}
+
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+ err := c.once.Do(func() error {
+ cmds, err := c.fn(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Extensions have cmd names in upper case. Convert them to lower case.
+ for k, v := range cmds {
+ lower := internal.ToLower(k)
+ if lower != k {
+ cmds[lower] = v
+ }
+ }
+
+ c.cmds = cmds
+ return nil
+ })
+ return c.cmds, err
+}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+ ID int64
+ Time time.Time
+ Duration time.Duration
+ Args []string
+ // These are also optional fields emitted only by Redis 4.0 or greater:
+ // https://redis.io/commands/slowlog#output-format
+ ClientAddr string
+ ClientName string
+}
+
+type SlowLogCmd struct {
+ baseCmd
+
+ val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+ return &SlowLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+ cmd.val = val
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+ return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SlowLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]SlowLog, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 4 {
+ return fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", nn)
+ }
+
+ if cmd.val[i].ID, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ createdAt, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Time = time.Unix(createdAt, 0)
+
+ costs, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Duration = time.Duration(costs) * time.Microsecond
+
+ cmdLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if cmdLen < 1 {
+ return fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+ }
+
+ cmd.val[i].Args = make([]string, cmdLen)
+ for f := 0; f < len(cmd.val[i].Args); f++ {
+ cmd.val[i].Args[f], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if nn >= 5 {
+ if cmd.val[i].ClientAddr, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
+ if nn >= 6 {
+ if cmd.val[i].ClientName, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceCmd struct {
+ baseCmd
+
+ val map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceCmd)(nil)
+
+func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceCmd {
+ return &MapStringInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceCmd) SetVal(val map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceCmd) Val() map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceCmd) Result() (map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err == Nil {
+ cmd.val[k] = Nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ cmd.val[k] = err
+ continue
+ }
+ return err
+ }
+ cmd.val[k] = v
+ }
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringStringSliceCmd struct {
+ baseCmd
+
+ val []map[string]string
+}
+
+var _ Cmder = (*MapStringStringSliceCmd)(nil)
+
+func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapStringStringSliceCmd {
+ return &MapStringStringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringStringSliceCmd) SetVal(val []map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringSliceCmd) Val() []map[string]string {
+ return cmd.val
+}
+
+func (cmd *MapStringStringSliceCmd) Result() ([]map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringStringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]string, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]string, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ v, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceSliceCmd struct {
+ baseCmd
+
+ val []map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceSliceCmd)(nil)
+
+func NewMapStringInterfaceSliceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceSliceCmd {
+ return &MapStringInterfaceSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceSliceCmd) SetVal(val []map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Val() []map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Result() ([]map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringInterfaceSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]interface{}, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err != Nil {
+ return err
+ }
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValuesCmd struct {
+ baseCmd
+
+ key string
+ val []string
+}
+
+var _ Cmder = (*KeyValuesCmd)(nil)
+
+func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd {
+ return &KeyValuesCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValuesCmd) SetVal(key string, val []string) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *KeyValuesCmd) Val() (string, []string) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *KeyValuesCmd) Result() (string, []string, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *KeyValuesCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceWithKeyCmd struct {
+ baseCmd
+
+ key string
+ val []Z
+}
+
+var _ Cmder = (*ZSliceWithKeyCmd)(nil)
+
+func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd {
+ return &ZSliceWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceWithKeyCmd) SetVal(key string, val []Z) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *ZSliceWithKeyCmd) Val() (string, []Z) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *ZSliceWithKeyCmd) Result() (string, []Z, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *ZSliceWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type Function struct {
+ Name string
+ Description string
+ Flags []string
+}
+
+type Library struct {
+ Name string
+ Engine string
+ Functions []Function
+ Code string
+}
+
+type FunctionListCmd struct {
+ baseCmd
+
+ val []Library
+}
+
+var _ Cmder = (*FunctionListCmd)(nil)
+
+func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd {
+ return &FunctionListCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionListCmd) SetVal(val []Library) {
+ cmd.val = val
+}
+
+func (cmd *FunctionListCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionListCmd) Val() []Library {
+ return cmd.val
+}
+
+func (cmd *FunctionListCmd) Result() ([]Library, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionListCmd) First() (*Library, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ if len(cmd.val) > 0 {
+ return &cmd.val[0], nil
+ }
+ return nil, Nil
+}
+
+func (cmd *FunctionListCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ libraries := make([]Library, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ library := Library{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "library_name":
+ library.Name, err = rd.ReadString()
+ case "engine":
+ library.Engine, err = rd.ReadString()
+ case "functions":
+ library.Functions, err = cmd.readFunctions(rd)
+ case "library_code":
+ library.Code, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ libraries[i] = library
+ }
+ cmd.val = libraries
+ return nil
+}
+
+func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ functions := make([]Function, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function := Function{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ if function.Name, err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ case "description":
+ if function.Description, err = rd.ReadString(); err != nil && err != Nil {
+ return nil, err
+ }
+ case "flags":
+ // resp set
+ nx, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function.Flags = make([]string, nx)
+ for j := 0; j < nx; j++ {
+ if function.Flags[j], err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ }
+ default:
+ return nil, fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+ }
+
+ functions[i] = function
+ }
+ return functions, nil
+}
+
+// FunctionStats contains information about the scripts currently executing on the server, and the available engines
+// - Engines:
+// Statistics about the engine like number of functions and number of libraries
+// - RunningScript:
+// The script currently running on the shard we're connecting to.
+// For Redis Enterprise and Redis Cloud, this represents the
+// function with the longest running time, across all the running functions, on all shards
+// - RunningScripts
+// All scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+type FunctionStats struct {
+ Engines []Engine
+ isRunning bool
+ rs RunningScript
+ allrs []RunningScript
+}
+
+func (fs *FunctionStats) Running() bool {
+ return fs.isRunning
+}
+
+func (fs *FunctionStats) RunningScript() (RunningScript, bool) {
+ return fs.rs, fs.isRunning
+}
+
+// AllRunningScripts returns all scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+func (fs *FunctionStats) AllRunningScripts() []RunningScript {
+ return fs.allrs
+}
+
+type RunningScript struct {
+ Name string
+ Command []string
+ Duration time.Duration
+}
+
+type Engine struct {
+ Language string
+ LibrariesCount int64
+ FunctionsCount int64
+}
+
+type FunctionStatsCmd struct {
+ baseCmd
+ val FunctionStats
+}
+
+var _ Cmder = (*FunctionStatsCmd)(nil)
+
+func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd {
+ return &FunctionStatsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionStatsCmd) SetVal(val FunctionStats) {
+ cmd.val = val
+}
+
+func (cmd *FunctionStatsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionStatsCmd) Val() FunctionStats {
+ return cmd.val
+}
+
+func (cmd *FunctionStatsCmd) Result() (FunctionStats, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionStatsCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result FunctionStats
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "running_script":
+ result.rs, result.isRunning, err = cmd.readRunningScript(rd)
+ case "engines":
+ result.Engines, err = cmd.readEngines(rd)
+ case "all_running_scripts": // Redis Enterprise only
+ result.allrs, result.isRunning, err = cmd.readRunningScripts(rd)
+ default:
+ return fmt.Errorf("redis: function stats unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+func (cmd *FunctionStatsCmd) readRunningScript(rd *proto.Reader) (RunningScript, bool, error) {
+ err := rd.ReadFixedMapLen(3)
+ if err != nil {
+ if err == Nil {
+ return RunningScript{}, false, nil
+ }
+ return RunningScript{}, false, err
+ }
+
+ var runningScript RunningScript
+ for i := 0; i < 3; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+
+ switch key {
+ case "name":
+ runningScript.Name, err = rd.ReadString()
+ case "duration_ms":
+ runningScript.Duration, err = cmd.readDuration(rd)
+ case "command":
+ runningScript.Command, err = cmd.readCommand(rd)
+ default:
+ return RunningScript{}, false, fmt.Errorf("redis: function stats unexpected running_script key %s", key)
+ }
+
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+ }
+
+ return runningScript, true, nil
+}
+
+func (cmd *FunctionStatsCmd) readEngines(rd *proto.Reader) ([]Engine, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ engines := make([]Engine, 0, n)
+ for i := 0; i < n; i++ {
+ engine := Engine{}
+ engine.Language, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ err = rd.ReadFixedMapLen(2)
+ if err != nil {
+ return nil, fmt.Errorf("redis: function stats unexpected %s engine map length", engine.Language)
+ }
+
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ switch key {
+ case "libraries_count":
+ engine.LibrariesCount, err = rd.ReadInt()
+ case "functions_count":
+ engine.FunctionsCount, err = rd.ReadInt()
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ engines = append(engines, engine)
+ }
+ return engines, nil
+}
+
+func (cmd *FunctionStatsCmd) readDuration(rd *proto.Reader) (time.Duration, error) {
+ t, err := rd.ReadInt()
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(t) * time.Millisecond, nil
+}
+
+func (cmd *FunctionStatsCmd) readCommand(rd *proto.Reader) ([]string, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ command := make([]string, 0, n)
+ for i := 0; i < n; i++ {
+ x, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ command = append(command, x)
+ }
+
+ return command, nil
+}
+
+func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScript, bool, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, false, err
+ }
+
+ runningScripts := make([]RunningScript, 0, n)
+ for i := 0; i < n; i++ {
+ rs, _, err := cmd.readRunningScript(rd)
+ if err != nil {
+ return nil, false, err
+ }
+ runningScripts = append(runningScripts, rs)
+ }
+
+ return runningScripts, len(runningScripts) > 0, nil
+}
+
+//------------------------------------------------------------------------------
+
+// LCSQuery is a parameter used for the LCS command
+type LCSQuery struct {
+ Key1 string
+ Key2 string
+ Len bool
+ Idx bool
+ MinMatchLen int
+ WithMatchLen bool
+}
+
+// LCSMatch is the result set of the LCS command.
+type LCSMatch struct {
+ MatchString string
+ Matches []LCSMatchedPosition
+ Len int64
+}
+
+type LCSMatchedPosition struct {
+ Key1 LCSPosition
+ Key2 LCSPosition
+
+ // only for withMatchLen is true
+ MatchLen int64
+}
+
+type LCSPosition struct {
+ Start int64
+ End int64
+}
+
+type LCSCmd struct {
+ baseCmd
+
+ // 1: match string
+ // 2: match len
+ // 3: match idx LCSMatch
+ readType uint8
+ val *LCSMatch
+}
+
+func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd {
+ args := make([]interface{}, 3, 7)
+ args[0] = "lcs"
+ args[1] = q.Key1
+ args[2] = q.Key2
+
+ cmd := &LCSCmd{readType: 1}
+ if q.Len {
+ cmd.readType = 2
+ args = append(args, "len")
+ } else if q.Idx {
+ cmd.readType = 3
+ args = append(args, "idx")
+ if q.MinMatchLen != 0 {
+ args = append(args, "minmatchlen", q.MinMatchLen)
+ }
+ if q.WithMatchLen {
+ args = append(args, "withmatchlen")
+ }
+ }
+ cmd.baseCmd = baseCmd{
+ ctx: ctx,
+ args: args,
+ }
+
+ return cmd
+}
+
+func (cmd *LCSCmd) SetVal(val *LCSMatch) {
+ cmd.val = val
+}
+
+func (cmd *LCSCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *LCSCmd) Val() *LCSMatch {
+ return cmd.val
+}
+
+func (cmd *LCSCmd) Result() (*LCSMatch, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *LCSCmd) readReply(rd *proto.Reader) (err error) {
+ lcs := &LCSMatch{}
+ switch cmd.readType {
+ case 1:
+ // match string
+ if lcs.MatchString, err = rd.ReadString(); err != nil {
+ return err
+ }
+ case 2:
+ // match len
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ case 3:
+ // read LCSMatch
+ if err = rd.ReadFixedMapLen(2); err != nil {
+ return err
+ }
+
+ // read matches or len field
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "matches":
+ // read array of matched positions
+ if lcs.Matches, err = cmd.readMatchedPositions(rd); err != nil {
+ return err
+ }
+ case "len":
+ // read match length
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ cmd.val = lcs
+ return nil
+}
+
+func (cmd *LCSCmd) readMatchedPositions(rd *proto.Reader) ([]LCSMatchedPosition, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ positions := make([]LCSMatchedPosition, n)
+ for i := 0; i < n; i++ {
+ pn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ if positions[i].Key1, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+ if positions[i].Key2, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+
+ // read match length if WithMatchLen is true
+ if pn > 2 {
+ if positions[i].MatchLen, err = rd.ReadInt(); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return positions, nil
+}
+
+func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return pos, err
+ }
+ if pos.Start, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+ if pos.End, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+
+ return pos, nil
+}
+
+// ------------------------------------------------------------------------
+
+type KeyFlags struct {
+ Key string
+ Flags []string
+}
+
+type KeyFlagsCmd struct {
+ baseCmd
+
+ val []KeyFlags
+}
+
+var _ Cmder = (*KeyFlagsCmd)(nil)
+
+func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd {
+ return &KeyFlagsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyFlagsCmd) SetVal(val []KeyFlags) {
+ cmd.val = val
+}
+
+func (cmd *KeyFlagsCmd) Val() []KeyFlags {
+ return cmd.val
+}
+
+func (cmd *KeyFlagsCmd) Result() ([]KeyFlags, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyFlagsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ if n == 0 {
+ cmd.val = make([]KeyFlags, 0)
+ return nil
+ }
+
+ cmd.val = make([]KeyFlags, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ flagsLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Flags = make([]string, flagsLen)
+
+ for j := 0; j < flagsLen; j++ {
+ if cmd.val[i].Flags[j], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ---------------------------------------------------------------------------------------------------
+
+type ClusterLink struct {
+ Direction string
+ Node string
+ CreateTime int64
+ Events string
+ SendBufferAllocated int64
+ SendBufferUsed int64
+}
+
+type ClusterLinksCmd struct {
+ baseCmd
+
+ val []ClusterLink
+}
+
+var _ Cmder = (*ClusterLinksCmd)(nil)
+
+func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd {
+ return &ClusterLinksCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterLinksCmd) SetVal(val []ClusterLink) {
+ cmd.val = val
+}
+
+func (cmd *ClusterLinksCmd) Val() []ClusterLink {
+ return cmd.val
+}
+
+func (cmd *ClusterLinksCmd) Result() ([]ClusterLink, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterLinksCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterLink, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "direction":
+ cmd.val[i].Direction, err = rd.ReadString()
+ case "node":
+ cmd.val[i].Node, err = rd.ReadString()
+ case "create-time":
+ cmd.val[i].CreateTime, err = rd.ReadInt()
+ case "events":
+ cmd.val[i].Events, err = rd.ReadString()
+ case "send-buffer-allocated":
+ cmd.val[i].SendBufferAllocated, err = rd.ReadInt()
+ case "send-buffer-used":
+ cmd.val[i].SendBufferUsed, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER LINKS reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ------------------------------------------------------------------------------------------------------------------
+
+type SlotRange struct {
+ Start int64
+ End int64
+}
+
+type Node struct {
+ ID string
+ Endpoint string
+ IP string
+ Hostname string
+ Port int64
+ TLSPort int64
+ Role string
+ ReplicationOffset int64
+ Health string
+}
+
+type ClusterShard struct {
+ Slots []SlotRange
+ Nodes []Node
+}
+
+type ClusterShardsCmd struct {
+ baseCmd
+
+ val []ClusterShard
+}
+
+var _ Cmder = (*ClusterShardsCmd)(nil)
+
+func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd {
+ return &ClusterShardsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterShardsCmd) SetVal(val []ClusterShard) {
+ cmd.val = val
+}
+
+func (cmd *ClusterShardsCmd) Val() []ClusterShard {
+ return cmd.val
+}
+
+func (cmd *ClusterShardsCmd) Result() ([]ClusterShard, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterShardsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterShard, n)
+
+ for i := 0; i < n; i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "slots":
+ l, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ for k := 0; k < l; k += 2 {
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i].Slots = append(cmd.val[i].Slots, SlotRange{Start: start, End: end})
+ }
+ case "nodes":
+ nodesLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Nodes = make([]Node, nodesLen)
+ for k := 0; k < nodesLen; k++ {
+ nodeMapLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for l := 0; l < nodeMapLen; l++ {
+ nodeKey, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch nodeKey {
+ case "id":
+ cmd.val[i].Nodes[k].ID, err = rd.ReadString()
+ case "endpoint":
+ cmd.val[i].Nodes[k].Endpoint, err = rd.ReadString()
+ case "ip":
+ cmd.val[i].Nodes[k].IP, err = rd.ReadString()
+ case "hostname":
+ cmd.val[i].Nodes[k].Hostname, err = rd.ReadString()
+ case "port":
+ cmd.val[i].Nodes[k].Port, err = rd.ReadInt()
+ case "tls-port":
+ cmd.val[i].Nodes[k].TLSPort, err = rd.ReadInt()
+ case "role":
+ cmd.val[i].Nodes[k].Role, err = rd.ReadString()
+ case "replication-offset":
+ cmd.val[i].Nodes[k].ReplicationOffset, err = rd.ReadInt()
+ case "health":
+ cmd.val[i].Nodes[k].Health, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS node reply", nodeKey)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS reply", key)
+ }
+ }
+ }
+
+ return nil
+}
+
+// -----------------------------------------
+
+type RankScore struct {
+ Rank int64
+ Score float64
+}
+
+type RankWithScoreCmd struct {
+ baseCmd
+
+ val RankScore
+}
+
+var _ Cmder = (*RankWithScoreCmd)(nil)
+
+func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd {
+ return &RankWithScoreCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *RankWithScoreCmd) SetVal(val RankScore) {
+ cmd.val = val
+}
+
+func (cmd *RankWithScoreCmd) Val() RankScore {
+ return cmd.val
+}
+
+func (cmd *RankWithScoreCmd) Result() (RankScore, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *RankWithScoreCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ rank, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ score, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = RankScore{Rank: rank, Score: score}
+
+ return nil
+}
+
+// --------------------------------------------------------------------------------------------------
+
+// ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0)
+type ClientFlags uint64
+
+const (
+ ClientSlave ClientFlags = 1 << 0 /* This client is a replica */
+ ClientMaster ClientFlags = 1 << 1 /* This client is a master */
+ ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */
+ ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */
+ ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */
+ ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */
+ ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */
+ ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */
+ ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */
+ ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */
+ ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */
+ ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */
+ ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */
+ ClientMasterForceReply ClientFlags = 1 << 13 /* Queue replies even if is master */
+ ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */
+ ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */
+ ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */
+ ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */
+ ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */
+ ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */
+ ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */
+ ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp
+ ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */
+ ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */
+ ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */
+ ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */
+ ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */
+ ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */
+ ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */
+ ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */
+ ClientExecutingCommand ClientFlags = 1 << 29 /* Indicates that the client is currently in the process of handling
+ a command. usually this will be marked only during call()
+ however, blocked clients might have this flag kept until they
+ will try to reprocess the command. */
+ ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */
+ ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */
+ ClientTrackingBrokenRedir ClientFlags = 1 << 32 /* Target client is invalid. */
+ ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */
+ ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */
+ ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */
+ ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */
+ ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/
+ ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */
+ ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */
+ ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */
+ ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */
+ ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */
+ ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */
+ ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */
+ ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */
+ ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */
+)
+
+// ClientInfo is redis-server ClientInfo, not go-redis *Client
+type ClientInfo struct {
+ ID int64 // redis version 2.8.12, a unique 64-bit client ID
+ Addr string // address/port of the client
+ LAddr string // address/port of local address client connected to (bind address)
+ FD int64 // file descriptor corresponding to the socket
+ Name string // the name set by the client with CLIENT SETNAME
+ Age time.Duration // total duration of the connection in seconds
+ Idle time.Duration // idle time of the connection in seconds
+ Flags ClientFlags // client flags (see below)
+ DB int // current database ID
+ Sub int // number of channel subscriptions
+ PSub int // number of pattern matching subscriptions
+ SSub int // redis version 7.0.3, number of shard channel subscriptions
+ Multi int // number of commands in a MULTI/EXEC context
+ QueryBuf int // qbuf, query buffer length (0 means no query pending)
+ QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full)
+ ArgvMem int // incomplete arguments for the next command (already extracted from query buffer)
+ MultiMem int // redis version 7.0, memory is used up by buffered multi commands
+ BufferSize int // rbs, usable size of buffer
+ BufferPeak int // rbp, peak used size of buffer in last 5 sec interval
+ OutputBufferLength int // obl, output buffer length
+ OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full)
+ OutputMemory int // omem, output buffer memory usage
+ TotalMemory int // tot-mem, total memory consumed by this client in its various buffers
+ Events string // file descriptor events (see below)
+ LastCmd string // cmd, last command played
+ User string // the authenticated username of the client
+ Redir int64 // client id of current client tracking redirection
+ Resp int // redis version 7.0, client RESP protocol version
+ LibName string // redis version 7.2, client library name
+ LibVer string // redis version 7.2, client library version
+}
+
+type ClientInfoCmd struct {
+ baseCmd
+
+ val *ClientInfo
+}
+
+var _ Cmder = (*ClientInfoCmd)(nil)
+
+func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd {
+ return &ClientInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClientInfoCmd) SetVal(val *ClientInfo) {
+ cmd.val = val
+}
+
+func (cmd *ClientInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClientInfoCmd) Val() *ClientInfo {
+ return cmd.val
+}
+
+func (cmd *ClientInfoCmd) Result() (*ClientInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClientInfoCmd) readReply(rd *proto.Reader) (err error) {
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ // sds o = catClientInfoString(sdsempty(), c);
+ // o = sdscatlen(o,"\n",1);
+ // addReplyVerbatim(c,o,sdslen(o),"txt");
+ // sdsfree(o);
+ cmd.val, err = parseClientInfo(strings.TrimSpace(txt))
+ return err
+}
+
+// fmt.Sscanf() cannot handle null values
+func parseClientInfo(txt string) (info *ClientInfo, err error) {
+ info = &ClientInfo{}
+ for _, s := range strings.Split(txt, " ") {
+ kv := strings.Split(s, "=")
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("redis: unexpected client info data (%s)", s)
+ }
+ key, val := kv[0], kv[1]
+
+ switch key {
+ case "id":
+ info.ID, err = strconv.ParseInt(val, 10, 64)
+ case "addr":
+ info.Addr = val
+ case "laddr":
+ info.LAddr = val
+ case "fd":
+ info.FD, err = strconv.ParseInt(val, 10, 64)
+ case "name":
+ info.Name = val
+ case "age":
+ var age int
+ if age, err = strconv.Atoi(val); err == nil {
+ info.Age = time.Duration(age) * time.Second
+ }
+ case "idle":
+ var idle int
+ if idle, err = strconv.Atoi(val); err == nil {
+ info.Idle = time.Duration(idle) * time.Second
+ }
+ case "flags":
+ if val == "N" {
+ break
+ }
+
+ for i := 0; i < len(val); i++ {
+ switch val[i] {
+ case 'S':
+ info.Flags |= ClientSlave
+ case 'O':
+ info.Flags |= ClientSlave | ClientMonitor
+ case 'M':
+ info.Flags |= ClientMaster
+ case 'P':
+ info.Flags |= ClientPubSub
+ case 'x':
+ info.Flags |= ClientMulti
+ case 'b':
+ info.Flags |= ClientBlocked
+ case 't':
+ info.Flags |= ClientTracking
+ case 'R':
+ info.Flags |= ClientTrackingBrokenRedir
+ case 'B':
+ info.Flags |= ClientTrackingBCAST
+ case 'd':
+ info.Flags |= ClientDirtyCAS
+ case 'c':
+ info.Flags |= ClientCloseAfterCommand
+ case 'u':
+ info.Flags |= ClientUnBlocked
+ case 'A':
+ info.Flags |= ClientCloseASAP
+ case 'U':
+ info.Flags |= ClientUnixSocket
+ case 'r':
+ info.Flags |= ClientReadOnly
+ case 'e':
+ info.Flags |= ClientNoEvict
+ case 'T':
+ info.Flags |= ClientNoTouch
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info flags(%s)", string(val[i]))
+ }
+ }
+ case "db":
+ info.DB, err = strconv.Atoi(val)
+ case "sub":
+ info.Sub, err = strconv.Atoi(val)
+ case "psub":
+ info.PSub, err = strconv.Atoi(val)
+ case "ssub":
+ info.SSub, err = strconv.Atoi(val)
+ case "multi":
+ info.Multi, err = strconv.Atoi(val)
+ case "qbuf":
+ info.QueryBuf, err = strconv.Atoi(val)
+ case "qbuf-free":
+ info.QueryBufFree, err = strconv.Atoi(val)
+ case "argv-mem":
+ info.ArgvMem, err = strconv.Atoi(val)
+ case "multi-mem":
+ info.MultiMem, err = strconv.Atoi(val)
+ case "rbs":
+ info.BufferSize, err = strconv.Atoi(val)
+ case "rbp":
+ info.BufferPeak, err = strconv.Atoi(val)
+ case "obl":
+ info.OutputBufferLength, err = strconv.Atoi(val)
+ case "oll":
+ info.OutputListLength, err = strconv.Atoi(val)
+ case "omem":
+ info.OutputMemory, err = strconv.Atoi(val)
+ case "tot-mem":
+ info.TotalMemory, err = strconv.Atoi(val)
+ case "events":
+ info.Events = val
+ case "cmd":
+ info.LastCmd = val
+ case "user":
+ info.User = val
+ case "redir":
+ info.Redir, err = strconv.ParseInt(val, 10, 64)
+ case "resp":
+ info.Resp, err = strconv.Atoi(val)
+ case "lib-name":
+ info.LibName = val
+ case "lib-ver":
+ info.LibVer = val
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info key(%s)", key)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return info, nil
+}
+
+// -------------------------------------------
+
+type ACLLogEntry struct {
+ Count int64
+ Reason string
+ Context string
+ Object string
+ Username string
+ AgeSeconds float64
+ ClientInfo *ClientInfo
+ EntryID int64
+ TimestampCreated int64
+ TimestampLastUpdated int64
+}
+
+type ACLLogCmd struct {
+ baseCmd
+
+ val []*ACLLogEntry
+}
+
+var _ Cmder = (*ACLLogCmd)(nil)
+
+func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd {
+ return &ACLLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ACLLogCmd) SetVal(val []*ACLLogEntry) {
+ cmd.val = val
+}
+
+func (cmd *ACLLogCmd) Val() []*ACLLogEntry {
+ return cmd.val
+}
+
+func (cmd *ACLLogCmd) Result() ([]*ACLLogEntry, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ACLLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]*ACLLogEntry, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i] = &ACLLogEntry{}
+ entry := cmd.val[i]
+ respLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ for j := 0; j < respLen; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "count":
+ entry.Count, err = rd.ReadInt()
+ case "reason":
+ entry.Reason, err = rd.ReadString()
+ case "context":
+ entry.Context, err = rd.ReadString()
+ case "object":
+ entry.Object, err = rd.ReadString()
+ case "username":
+ entry.Username, err = rd.ReadString()
+ case "age-seconds":
+ entry.AgeSeconds, err = rd.ReadFloat()
+ case "client-info":
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ entry.ClientInfo, err = parseClientInfo(strings.TrimSpace(txt))
+ if err != nil {
+ return err
+ }
+ case "entry-id":
+ entry.EntryID, err = rd.ReadInt()
+ case "timestamp-created":
+ entry.TimestampCreated, err = rd.ReadInt()
+ case "timestamp-last-updated":
+ entry.TimestampLastUpdated, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in ACL LOG reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// LibraryInfo holds the library info.
+type LibraryInfo struct {
+ LibName *string
+ LibVer *string
+}
+
+// WithLibraryName returns a valid LibraryInfo with library name only.
+func WithLibraryName(libName string) LibraryInfo {
+ return LibraryInfo{LibName: &libName}
+}
+
+// WithLibraryVersion returns a valid LibraryInfo with library version only.
+func WithLibraryVersion(libVer string) LibraryInfo {
+ return LibraryInfo{LibVer: &libVer}
+}
+
+// -------------------------------------------
+
+type InfoCmd struct {
+ baseCmd
+ val map[string]map[string]string
+}
+
+var _ Cmder = (*InfoCmd)(nil)
+
+func NewInfoCmd(ctx context.Context, args ...interface{}) *InfoCmd {
+ return &InfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *InfoCmd) SetVal(val map[string]map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *InfoCmd) Val() map[string]map[string]string {
+ return cmd.val
+}
+
+func (cmd *InfoCmd) Result() (map[string]map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *InfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *InfoCmd) readReply(rd *proto.Reader) error {
+ val, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ section := ""
+ scanner := bufio.NewScanner(strings.NewReader(val))
+ moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`)
+
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "#") {
+ if cmd.val == nil {
+ cmd.val = make(map[string]map[string]string)
+ }
+ section = strings.TrimPrefix(line, "# ")
+ cmd.val[section] = make(map[string]string)
+ } else if line != "" {
+ if section == "Modules" {
+ kv := moduleRe.FindStringSubmatch(line)
+ if len(kv) == 3 {
+ cmd.val[section][kv[1]] = kv[2]
+ }
+ } else {
+ kv := strings.SplitN(line, ":", 2)
+ if len(kv) == 2 {
+ cmd.val[section][kv[0]] = kv[1]
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (cmd *InfoCmd) Item(section, key string) string {
+ if cmd.val == nil {
+ return ""
+ } else if cmd.val[section] == nil {
+ return ""
+ } else {
+ return cmd.val[section][key]
+ }
+}
+
+type MonitorStatus int
+
+const (
+ monitorStatusIdle MonitorStatus = iota
+ monitorStatusStart
+ monitorStatusStop
+)
+
+type MonitorCmd struct {
+ baseCmd
+ ch chan string
+ status MonitorStatus
+ mu sync.Mutex
+}
+
+func newMonitorCmd(ctx context.Context, ch chan string) *MonitorCmd {
+ return &MonitorCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"monitor"},
+ },
+ ch: ch,
+ status: monitorStatusIdle,
+ mu: sync.Mutex{},
+ }
+}
+
+func (cmd *MonitorCmd) String() string {
+ return cmdString(cmd, nil)
+}
+
+func (cmd *MonitorCmd) readReply(rd *proto.Reader) error {
+ ctx, cancel := context.WithCancel(cmd.ctx)
+ go func(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ err := cmd.readMonitor(rd, cancel)
+ if err != nil {
+ cmd.err = err
+ return
+ }
+ }
+ }
+ }(ctx)
+ return nil
+}
+
+func (cmd *MonitorCmd) readMonitor(rd *proto.Reader, cancel context.CancelFunc) error {
+ for {
+ cmd.mu.Lock()
+ st := cmd.status
+ cmd.mu.Unlock()
+ if pk, _ := rd.Peek(1); len(pk) != 0 && st == monitorStatusStart {
+ line, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.ch <- line
+ }
+ if st == monitorStatusStop {
+ cancel()
+ break
+ }
+ }
+ return nil
+}
+
+func (cmd *MonitorCmd) Start() {
+ cmd.mu.Lock()
+ defer cmd.mu.Unlock()
+ cmd.status = monitorStatusStart
+}
+
+func (cmd *MonitorCmd) Stop() {
+ cmd.mu.Lock()
+ defer cmd.mu.Unlock()
+ cmd.status = monitorStatusStop
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command_test.go
index 168f9f6..b9d558c 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command_test.go
@@ -4,10 +4,10 @@ import (
"errors"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ "github.com/redis/go-redis/v9"
- redis "github.com/go-redis/redis/v8"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
)
var _ = Describe("Cmd", func() {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands.go
new file mode 100644
index 0000000..db59594
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands.go
@@ -0,0 +1,718 @@
+package redis
+
+import (
+ "context"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+)
+
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+// For example:
+//
+// rdb.Set(ctx, key, value, redis.KeepTTL)
+const KeepTTL = -1
+
+func usePrecise(dur time.Duration) bool {
+ return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Millisecond {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
+ dur, time.Millisecond,
+ )
+ return 1
+ }
+ return int64(dur / time.Millisecond)
+}
+
+func formatSec(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Second {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1s",
+ dur, time.Second,
+ )
+ return 1
+ }
+ return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+ if len(src) == 1 {
+ return appendArg(dst, src[0])
+ }
+
+ dst = append(dst, src...)
+ return dst
+}
+
+func appendArg(dst []interface{}, arg interface{}) []interface{} {
+ switch arg := arg.(type) {
+ case []string:
+ for _, s := range arg {
+ dst = append(dst, s)
+ }
+ return dst
+ case []interface{}:
+ dst = append(dst, arg...)
+ return dst
+ case map[string]interface{}:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ case map[string]string:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP:
+ return append(dst, arg)
+ default:
+ // scan struct field
+ v := reflect.ValueOf(arg)
+ if v.Type().Kind() == reflect.Ptr {
+ if v.IsNil() {
+ // error: arg is not a valid object
+ return dst
+ }
+ v = v.Elem()
+ }
+
+ if v.Type().Kind() == reflect.Struct {
+ return appendStructField(dst, v)
+ }
+
+ return append(dst, arg)
+ }
+}
+
+// appendStructField appends the field and value held by the structure v to dst, and returns the appended dst.
+func appendStructField(dst []interface{}, v reflect.Value) []interface{} {
+ typ := v.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ tag := typ.Field(i).Tag.Get("redis")
+ if tag == "" || tag == "-" {
+ continue
+ }
+ name, opt, _ := strings.Cut(tag, ",")
+ if name == "" {
+ continue
+ }
+
+ field := v.Field(i)
+
+ // miss field
+ if omitEmpty(opt) && isEmptyValue(field) {
+ continue
+ }
+
+ if field.CanInterface() {
+ dst = append(dst, name, field.Interface())
+ }
+ }
+
+ return dst
+}
+
+func omitEmpty(opt string) bool {
+ for opt != "" {
+ var name string
+ name, opt, _ = strings.Cut(opt, ",")
+ if name == "omitempty" {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Pointer:
+ return v.IsNil()
+ }
+ return false
+}
+
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+
+ TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
+
+ Command(ctx context.Context) *CommandsInfoCmd
+ CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd
+ CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd
+ CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd
+ ClientGetName(ctx context.Context) *StringCmd
+ Echo(ctx context.Context, message interface{}) *StringCmd
+ Ping(ctx context.Context) *StatusCmd
+ Quit(ctx context.Context) *StatusCmd
+ Unlink(ctx context.Context, keys ...string) *IntCmd
+
+ BgRewriteAOF(ctx context.Context) *StatusCmd
+ BgSave(ctx context.Context) *StatusCmd
+ ClientKill(ctx context.Context, ipPort string) *StatusCmd
+ ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+ ClientList(ctx context.Context) *StringCmd
+ ClientInfo(ctx context.Context) *ClientInfoCmd
+ ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+ ClientUnpause(ctx context.Context) *BoolCmd
+ ClientID(ctx context.Context) *IntCmd
+ ClientUnblock(ctx context.Context, id int64) *IntCmd
+ ClientUnblockWithError(ctx context.Context, id int64) *IntCmd
+ ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd
+ ConfigResetStat(ctx context.Context) *StatusCmd
+ ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+ ConfigRewrite(ctx context.Context) *StatusCmd
+ DBSize(ctx context.Context) *IntCmd
+ FlushAll(ctx context.Context) *StatusCmd
+ FlushAllAsync(ctx context.Context) *StatusCmd
+ FlushDB(ctx context.Context) *StatusCmd
+ FlushDBAsync(ctx context.Context) *StatusCmd
+ Info(ctx context.Context, section ...string) *StringCmd
+ LastSave(ctx context.Context) *IntCmd
+ Save(ctx context.Context) *StatusCmd
+ Shutdown(ctx context.Context) *StatusCmd
+ ShutdownSave(ctx context.Context) *StatusCmd
+ ShutdownNoSave(ctx context.Context) *StatusCmd
+ SlaveOf(ctx context.Context, host, port string) *StatusCmd
+ SlowLogGet(ctx context.Context, num int64) *SlowLogCmd
+ Time(ctx context.Context) *TimeCmd
+ DebugObject(ctx context.Context, key string) *StringCmd
+ MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
+
+ ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd
+
+ ACLCmdable
+ BitMapCmdable
+ ClusterCmdable
+ GearsCmdable
+ GenericCmdable
+ GeoCmdable
+ HashCmdable
+ HyperLogLogCmdable
+ ListCmdable
+ ProbabilisticCmdable
+ PubSubCmdable
+ ScriptingFunctionsCmdable
+ SetCmdable
+ SortedSetCmdable
+ StringCmdable
+ StreamCmdable
+ TimeseriesCmdable
+ JSONCmdable
+}
+
+type StatefulCmdable interface {
+ Cmdable
+ Auth(ctx context.Context, password string) *StatusCmd
+ AuthACL(ctx context.Context, username, password string) *StatusCmd
+ Select(ctx context.Context, index int) *StatusCmd
+ SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+ ClientSetName(ctx context.Context, name string) *BoolCmd
+ ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd
+ Hello(ctx context.Context, ver int, username, password, clientName string) *MapStringInterfaceCmd
+}
+
+var (
+ _ Cmdable = (*Client)(nil)
+ _ Cmdable = (*Tx)(nil)
+ _ Cmdable = (*Ring)(nil)
+ _ Cmdable = (*ClusterClient)(nil)
+)
+
+type cmdable func(ctx context.Context, cmd Cmder) error
+
+type statefulCmdable func(ctx context.Context, cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// AuthACL Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", username, password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) WaitAOF(ctx context.Context, numLocal, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "waitAOF", numLocal, numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "select", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "setname", name)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetInfo sends a CLIENT SETINFO command with the provided info.
+func (c statefulCmdable) ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd {
+ err := info.Validate()
+ if err != nil {
+ panic(err.Error())
+ }
+
+ var cmd *StatusCmd
+ if info.LibName != nil {
+ libName := fmt.Sprintf("go-redis(%s,%s)", *info.LibName, internal.ReplaceSpaces(runtime.Version()))
+ cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-NAME", libName)
+ } else {
+ cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-VER", *info.LibVer)
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Validate checks if only one field in the struct is non-nil.
+func (info LibraryInfo) Validate() error {
+ if info.LibName != nil && info.LibVer != nil {
+ return errors.New("both LibName and LibVer cannot be set at the same time")
+ }
+ if info.LibName == nil && info.LibVer == nil {
+ return errors.New("at least one of LibName and LibVer should be set")
+ }
+ return nil
+}
+
+// Hello Set the resp protocol used.
+func (c statefulCmdable) Hello(ctx context.Context,
+ ver int, username, password, clientName string,
+) *MapStringInterfaceCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "hello", ver)
+ if password != "" {
+ if username != "" {
+ args = append(args, "auth", username, password)
+ } else {
+ args = append(args, "auth", "default", password)
+ }
+ }
+ if clientName != "" {
+ args = append(args, "setname", clientName)
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd(ctx, "command")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FilterBy is used for the `CommandList` command parameter.
+type FilterBy struct {
+ Module string
+ ACLCat string
+ Pattern string
+}
+
+func (c cmdable) CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd {
+ args := make([]interface{}, 0, 5)
+ args = append(args, "command", "list")
+ if filter != nil {
+ if filter.Module != "" {
+ args = append(args, "filterby", "module", filter.Module)
+ } else if filter.ACLCat != "" {
+ args = append(args, "filterby", "aclcat", filter.ACLCat)
+ } else if filter.Pattern != "" {
+ args = append(args, "filterby", "pattern", filter.Pattern)
+ }
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeys"
+ copy(args[2:], commands)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeysandflags"
+ copy(args[2:], commands)
+ cmd := NewKeyFlagsCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "getname")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "echo", message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "ping")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Quit(_ context.Context) *StatusCmd {
+ panic("not implemented")
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgrewriteaof")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientKillByFilter is new style syntax, while the ClientKill is old
+//
+// CLIENT KILL <option> [value] ... <option> [value]
+func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "client"
+ args[1] = "kill"
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientList(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "list")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnpause(ctx context.Context) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "unpause")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientID(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "id")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientInfo(ctx context.Context) *ClientInfoCmd {
+ cmd := NewClientInfoCmd(ctx, "client", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ------------------------------------------------------------------------------------------------
+
+func (c cmdable) ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "config", "get", parameter)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "resetstat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "rewrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Info(ctx context.Context, sections ...string) *StringCmd {
+ args := make([]interface{}, 1+len(sections))
+ args[0] = "info"
+ for i, section := range sections {
+ args[i+1] = section
+ }
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) InfoMap(ctx context.Context, sections ...string) *InfoCmd {
+ args := make([]interface{}, 1+len(sections))
+ args[0] = "info"
+ for i, section := range sections {
+ args[i+1] = section
+ }
+ cmd := NewInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LastSave(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "lastsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Save(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "save")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
+ var args []interface{}
+ if modifier == "" {
+ args = []interface{}{"shutdown"}
+ } else {
+ args = []interface{}{"shutdown", modifier}
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ if err := cmd.Err(); err != nil {
+ if err == io.EOF {
+ // Server quit as expected.
+ cmd.err = nil
+ }
+ } else {
+ // Server did not quit. String reply contains the reason.
+ cmd.err = errors.New(cmd.val)
+ cmd.val = ""
+ }
+ return cmd
+}
+
+func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "")
+}
+
+func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "save")
+}
+
+func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "nosave")
+}
+
+func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "slaveof", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
+ cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Sync(_ context.Context) {
+ panic("not implemented")
+}
+
+func (c cmdable) Time(ctx context.Context) *TimeCmd {
+ cmd := NewTimeCmd(ctx, "time")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "debug", "object", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
+ args := []interface{}{"memory", "usage", key}
+ if len(samples) > 0 {
+ if len(samples) != 1 {
+ panic("MemoryUsage expects single sample count")
+ }
+ args = append(args, "SAMPLES", samples[0])
+ }
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// ModuleLoadexConfig struct is used to specify the arguments for the MODULE LOADEX command of redis.
+// `MODULE LOADEX path [CONFIG name value [CONFIG name value ...]] [ARGS args [args ...]]`
+type ModuleLoadexConfig struct {
+ Path string
+ Conf map[string]interface{}
+ Args []interface{}
+}
+
+func (c *ModuleLoadexConfig) toArgs() []interface{} {
+ args := make([]interface{}, 3, 3+len(c.Conf)*3+len(c.Args)*2)
+ args[0] = "MODULE"
+ args[1] = "LOADEX"
+ args[2] = c.Path
+ for k, v := range c.Conf {
+ args = append(args, "CONFIG", k, v)
+ }
+ for _, arg := range c.Args {
+ args = append(args, "ARGS", arg)
+ }
+ return args
+}
+
+// ModuleLoadex Redis `MODULE LOADEX path [CONFIG name value [CONFIG name value ...]] [ARGS args [args ...]]` command.
+func (c cmdable) ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd {
+ cmd := NewStringCmd(ctx, conf.toArgs()...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+/*
+Monitor - represents a Redis MONITOR command, allowing the user to capture
+and process all commands sent to a Redis server. This mimics the behavior of
+MONITOR in the redis-cli.
+
+Notes:
+- Using MONITOR blocks the connection to the server for itself. It needs a dedicated connection
+- The user should create a channel of type string
+- This runs concurrently in the background. Trigger via the Start and Stop functions
+See further: Redis MONITOR command: https://redis.io/commands/monitor
+*/
+func (c cmdable) Monitor(ctx context.Context, ch chan string) *MonitorCmd {
+ cmd := newMonitorCmd(ctx, ch)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands_test.go
index 030bdf3..d30a9d8 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands_test.go
@@ -5,15 +5,25 @@ import (
"encoding/json"
"fmt"
"reflect"
+ "strconv"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9"
+ "github.com/redis/go-redis/v9/internal/proto"
)
+type TimeValue struct {
+ time.Time
+}
+
+func (t *TimeValue) ScanRedis(s string) (err error) {
+ t.Time, err = time.Parse(time.RFC3339Nano, s)
+ return
+}
+
var _ = Describe("Commands", func() {
ctx := context.TODO()
var client *redis.Client
@@ -47,6 +57,17 @@ var _ = Describe("Commands", func() {
Expect(stats.IdleConns).To(Equal(uint32(1)))
})
+ It("should hello", func() {
+ cmds, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Hello(ctx, 3, "", "", "")
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ m, err := cmds[0].(*redis.MapStringInterfaceCmd).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(m["proto"]).To(Equal(int64(3)))
+ })
+
It("should Echo", func() {
pipe := client.Pipeline()
echo := pipe.Echo(ctx, "hello")
@@ -74,7 +95,19 @@ var _ = Describe("Commands", func() {
Expect(time.Now()).To(BeTemporally("~", start.Add(wait), 3*time.Second))
})
- It("should Select", func() {
+ It("should WaitAOF", func() {
+ const waitAOF = 3 * time.Second
+ Skip("flaky test")
+
+ // assuming that the redis instance doesn't have AOF enabled
+ start := time.Now()
+ val, err := client.WaitAOF(ctx, 1, 1, waitAOF).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).NotTo(ContainSubstring("ERR WAITAOF cannot be used when numlocal is set but appendonly is disabled"))
+ Expect(time.Now()).To(BeTemporally("~", start.Add(waitAOF), 3*time.Second))
+ })
+
+ It("should Select", Label("NonRedisEnterprise"), func() {
pipe := client.Pipeline()
sel := pipe.Select(ctx, 1)
_, err := pipe.Exec(ctx)
@@ -84,7 +117,7 @@ var _ = Describe("Commands", func() {
Expect(sel.Val()).To(Equal("OK"))
})
- It("should SwapDB", func() {
+ It("should SwapDB", Label("NonRedisEnterprise"), func() {
pipe := client.Pipeline()
sel := pipe.SwapDB(ctx, 1, 2)
_, err := pipe.Exec(ctx)
@@ -111,6 +144,43 @@ var _ = Describe("Commands", func() {
}, "30s").Should(Equal("Background saving started"))
})
+ It("Should CommandGetKeys", func() {
+ keys, err := client.CommandGetKeys(ctx, "MSET", "a", "b", "c", "d", "e", "f").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).To(Equal([]string{"a", "c", "e"}))
+
+ keys, err = client.CommandGetKeys(ctx, "EVAL", "not consulted", "3", "key1", "key2", "key3", "arg1", "arg2", "arg3", "argN").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).To(Equal([]string{"key1", "key2", "key3"}))
+
+ keys, err = client.CommandGetKeys(ctx, "SORT", "mylist", "ALPHA", "STORE", "outlist").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).To(Equal([]string{"mylist", "outlist"}))
+
+ _, err = client.CommandGetKeys(ctx, "FAKECOMMAND", "arg1", "arg2").Result()
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError("ERR Invalid command specified"))
+ })
+
+ It("should CommandGetKeysAndFlags", func() {
+ keysAndFlags, err := client.CommandGetKeysAndFlags(ctx, "LMOVE", "mylist1", "mylist2", "left", "left").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keysAndFlags).To(Equal([]redis.KeyFlags{
+ {
+ Key: "mylist1",
+ Flags: []string{"RW", "access", "delete"},
+ },
+ {
+ Key: "mylist2",
+ Flags: []string{"RW", "insert"},
+ },
+ }))
+
+ _, err = client.CommandGetKeysAndFlags(ctx, "FAKECOMMAND", "arg1", "arg2").Result()
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError("ERR Invalid command specified"))
+ })
+
It("should ClientKill", func() {
r := client.ClientKill(ctx, "1.1.1.1:1111")
Expect(r.Err()).To(MatchError("ERR No such client"))
@@ -143,7 +213,13 @@ var _ = Describe("Commands", func() {
Expect(r).To(Equal(int64(0)))
})
- It("should ClientPause", func() {
+ It("should ClientInfo", func() {
+ info, err := client.ClientInfo(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info).NotTo(BeNil())
+ })
+
+ It("should ClientPause", Label("NonRedisEnterprise"), func() {
err := client.ClientPause(ctx, time.Second).Err()
Expect(err).NotTo(HaveOccurred())
@@ -167,30 +243,92 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("theclientname"))
})
+ It("should ClientSetInfo", func() {
+ pipe := client.Pipeline()
+
+ // Test setting the libName
+ libName := "go-redis"
+ libInfo := redis.WithLibraryName(libName)
+ setInfo := pipe.ClientSetInfo(ctx, libInfo)
+ _, err := pipe.Exec(ctx)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(setInfo.Err()).NotTo(HaveOccurred())
+ Expect(setInfo.Val()).To(Equal("OK"))
+
+ // Test setting the libVer
+ libVer := "vX.x"
+ libInfo = redis.WithLibraryVersion(libVer)
+ setInfo = pipe.ClientSetInfo(ctx, libInfo)
+ _, err = pipe.Exec(ctx)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(setInfo.Err()).NotTo(HaveOccurred())
+ Expect(setInfo.Val()).To(Equal("OK"))
+
+ // Test setting both fields, expect a panic
+ libInfo = redis.LibraryInfo{LibName: &libName, LibVer: &libVer}
+
+ Expect(func() {
+ defer func() {
+ if r := recover(); r != nil {
+ err := r.(error)
+ Expect(err).To(MatchError("both LibName and LibVer cannot be set at the same time"))
+ }
+ }()
+ pipe.ClientSetInfo(ctx, libInfo)
+ }).To(Panic())
+
+ // Test setting neither field, expect a panic
+ libInfo = redis.LibraryInfo{}
+
+ Expect(func() {
+ defer func() {
+ if r := recover(); r != nil {
+ err := r.(error)
+ Expect(err).To(MatchError("at least one of LibName and LibVer should be set"))
+ }
+ }()
+ pipe.ClientSetInfo(ctx, libInfo)
+ }).To(Panic())
+ // Test setting the default options for libName, libName suffix and libVer
+ clientInfo := client.ClientInfo(ctx).Val()
+ Expect(clientInfo.LibName).To(ContainSubstring("go-redis(go-redis,"))
+ // Test setting the libName suffix in options
+ opt := redisOptions()
+ opt.IdentitySuffix = "suffix"
+ client2 := redis.NewClient(opt)
+ defer client2.Close()
+ clientInfo = client2.ClientInfo(ctx).Val()
+ Expect(clientInfo.LibName).To(ContainSubstring("go-redis(suffix,"))
+
+ })
+
It("should ConfigGet", func() {
val, err := client.ConfigGet(ctx, "*").Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).NotTo(BeEmpty())
})
- It("should ConfigResetStat", func() {
+ It("should ConfigResetStat", Label("NonRedisEnterprise"), func() {
r := client.ConfigResetStat(ctx)
Expect(r.Err()).NotTo(HaveOccurred())
Expect(r.Val()).To(Equal("OK"))
})
- It("should ConfigSet", func() {
+ It("should ConfigSet", Label("NonRedisEnterprise"), func() {
configGet := client.ConfigGet(ctx, "maxmemory")
Expect(configGet.Err()).NotTo(HaveOccurred())
- Expect(configGet.Val()).To(HaveLen(2))
- Expect(configGet.Val()[0]).To(Equal("maxmemory"))
+ Expect(configGet.Val()).To(HaveLen(1))
+ _, ok := configGet.Val()["maxmemory"]
+ Expect(ok).To(BeTrue())
- configSet := client.ConfigSet(ctx, "maxmemory", configGet.Val()[1].(string))
+ configSet := client.ConfigSet(ctx, "maxmemory", configGet.Val()["maxmemory"])
Expect(configSet.Err()).NotTo(HaveOccurred())
Expect(configSet.Val()).To(Equal("OK"))
})
- It("should ConfigRewrite", func() {
+ It("should ConfigRewrite", Label("NonRedisEnterprise"), func() {
configRewrite := client.ConfigRewrite(ctx)
Expect(configRewrite.Err()).NotTo(HaveOccurred())
Expect(configRewrite.Val()).To(Equal("OK"))
@@ -208,6 +346,20 @@ var _ = Describe("Commands", func() {
Expect(info.Val()).NotTo(Equal(""))
})
+ It("should InfoMap", Label("redis.info"), func() {
+ info := client.InfoMap(ctx)
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).NotTo(BeNil())
+
+ info = client.InfoMap(ctx, "dummy")
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).To(BeNil())
+
+ info = client.InfoMap(ctx, "server")
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).To(HaveLen(1))
+ })
+
It("should Info cpu", func() {
info := client.Info(ctx, "cpu")
Expect(info.Err()).NotTo(HaveOccurred())
@@ -215,20 +367,28 @@ var _ = Describe("Commands", func() {
Expect(info.Val()).To(ContainSubstring(`used_cpu_sys`))
})
- It("should LastSave", func() {
+ It("should Info cpu and memory", func() {
+ info := client.Info(ctx, "cpu", "memory")
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).NotTo(Equal(""))
+ Expect(info.Val()).To(ContainSubstring(`used_cpu_sys`))
+ Expect(info.Val()).To(ContainSubstring(`memory`))
+ })
+
+ It("should LastSave", Label("NonRedisEnterprise"), func() {
lastSave := client.LastSave(ctx)
Expect(lastSave.Err()).NotTo(HaveOccurred())
Expect(lastSave.Val()).NotTo(Equal(0))
})
- It("should Save", func() {
+ It("should Save", Label("NonRedisEnterprise"), func() {
// workaround for "ERR Background save already in progress"
Eventually(func() string {
return client.Save(ctx).Val()
}, "10s").Should(Equal("OK"))
})
- It("should SlaveOf", func() {
+ It("should SlaveOf", Label("NonRedisEnterprise"), func() {
slaveOf := client.SlaveOf(ctx, "localhost", "8888")
Expect(slaveOf.Err()).NotTo(HaveOccurred())
Expect(slaveOf.Val()).To(Equal("OK"))
@@ -244,10 +404,10 @@ var _ = Describe("Commands", func() {
Expect(tm).To(BeTemporally("~", time.Now(), 3*time.Second))
})
- It("should Command", func() {
+ It("should Command", Label("NonRedisEnterprise"), func() {
cmds, err := client.Command(ctx).Result()
Expect(err).NotTo(HaveOccurred())
- Expect(len(cmds)).To(BeNumerically("~", 200, 25))
+ Expect(len(cmds)).To(BeNumerically("~", 240, 25))
cmd := cmds["mget"]
Expect(cmd.Name).To(Equal("mget"))
@@ -260,16 +420,65 @@ var _ = Describe("Commands", func() {
cmd = cmds["ping"]
Expect(cmd.Name).To(Equal("ping"))
Expect(cmd.Arity).To(Equal(int8(-1)))
- Expect(cmd.Flags).To(ContainElement("stale"))
Expect(cmd.Flags).To(ContainElement("fast"))
Expect(cmd.FirstKeyPos).To(Equal(int8(0)))
Expect(cmd.LastKeyPos).To(Equal(int8(0)))
Expect(cmd.StepCount).To(Equal(int8(0)))
})
+
+ It("should return all command names", func() {
+ cmdList := client.CommandList(ctx, nil)
+ Expect(cmdList.Err()).NotTo(HaveOccurred())
+ cmdNames := cmdList.Val()
+
+ Expect(cmdNames).NotTo(BeEmpty())
+
+ // Assert that some expected commands are present in the list
+ Expect(cmdNames).To(ContainElement("get"))
+ Expect(cmdNames).To(ContainElement("set"))
+ Expect(cmdNames).To(ContainElement("hset"))
+ })
+
+ It("should filter commands by module", func() {
+ filter := &redis.FilterBy{
+ Module: "JSON",
+ }
+ cmdList := client.CommandList(ctx, filter)
+ Expect(cmdList.Err()).NotTo(HaveOccurred())
+ Expect(cmdList.Val()).To(HaveLen(0))
+ })
+
+ It("should filter commands by ACL category", func() {
+ filter := &redis.FilterBy{
+ ACLCat: "admin",
+ }
+
+ cmdList := client.CommandList(ctx, filter)
+ Expect(cmdList.Err()).NotTo(HaveOccurred())
+ cmdNames := cmdList.Val()
+
+ // Assert that the returned list only contains commands from the admin ACL category
+ Expect(len(cmdNames)).To(BeNumerically(">", 10))
+ })
+
+ It("should filter commands by pattern", func() {
+ filter := &redis.FilterBy{
+ Pattern: "*GET*",
+ }
+ cmdList := client.CommandList(ctx, filter)
+ Expect(cmdList.Err()).NotTo(HaveOccurred())
+ cmdNames := cmdList.Val()
+
+ // Assert that the returned list only contains commands that match the given pattern
+ Expect(cmdNames).To(ContainElement("get"))
+ Expect(cmdNames).To(ContainElement("getbit"))
+ Expect(cmdNames).To(ContainElement("getrange"))
+ Expect(cmdNames).NotTo(ContainElement("set"))
+ })
})
Describe("debugging", func() {
- It("should DebugObject", func() {
+ PIt("should DebugObject", func() {
err := client.DebugObject(ctx, "foo").Err()
Expect(err).To(MatchError("ERR no such key"))
@@ -380,17 +589,28 @@ var _ = Describe("Commands", func() {
})
It("should ExpireAt", func() {
- set := client.Set(ctx, "key", "Hello", 0)
- Expect(set.Err()).NotTo(HaveOccurred())
- Expect(set.Val()).To(Equal("OK"))
+ setCmd := client.Set(ctx, "key", "Hello", 0)
+ Expect(setCmd.Err()).NotTo(HaveOccurred())
+ Expect(setCmd.Val()).To(Equal("OK"))
n, err := client.Exists(ctx, "key").Result()
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(int64(1)))
- expireAt := client.ExpireAt(ctx, "key", time.Now().Add(-time.Hour))
- Expect(expireAt.Err()).NotTo(HaveOccurred())
- Expect(expireAt.Val()).To(Equal(true))
+ // Check correct expiration time is set in the future
+ expireAt := time.Now().Add(time.Minute)
+ expireAtCmd := client.ExpireAt(ctx, "key", expireAt)
+ Expect(expireAtCmd.Err()).NotTo(HaveOccurred())
+ Expect(expireAtCmd.Val()).To(Equal(true))
+
+ timeCmd := client.ExpireTime(ctx, "key")
+ Expect(timeCmd.Err()).NotTo(HaveOccurred())
+ Expect(timeCmd.Val().Seconds()).To(BeNumerically("==", expireAt.Unix()))
+
+ // Check correct expiration in the past
+ expireAtCmd = client.ExpireAt(ctx, "key", time.Now().Add(-time.Hour))
+ Expect(expireAtCmd.Err()).NotTo(HaveOccurred())
+ Expect(expireAtCmd.Val()).To(Equal(true))
n, err = client.Exists(ctx, "key").Result()
Expect(err).NotTo(HaveOccurred())
@@ -415,7 +635,7 @@ var _ = Describe("Commands", func() {
Expect(keys.Val()).To(ConsistOf([]string{"four", "one", "three", "two"}))
})
- It("should Migrate", func() {
+ It("should Migrate", Label("NonRedisEnterprise"), func() {
migrate := client.Migrate(ctx, "localhost", redisSecondaryPort, "key", 0, 0)
Expect(migrate.Err()).NotTo(HaveOccurred())
Expect(migrate.Val()).To(Equal("NOKEY"))
@@ -429,7 +649,7 @@ var _ = Describe("Commands", func() {
Expect(migrate.Val()).To(Equal(""))
})
- It("should Move", func() {
+ It("should Move", Label("NonRedisEnterprise"), func() {
move := client.Move(ctx, "key", 2)
Expect(move.Err()).NotTo(HaveOccurred())
Expect(move.Val()).To(Equal(false))
@@ -456,7 +676,7 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("hello"))
})
- It("should Object", func() {
+ It("should Object", Label("NonRedisEnterprise"), func() {
start := time.Now()
set := client.Set(ctx, "key", "hello", 0)
Expect(set.Err()).NotTo(HaveOccurred())
@@ -466,6 +686,11 @@ var _ = Describe("Commands", func() {
Expect(refCount.Err()).NotTo(HaveOccurred())
Expect(refCount.Val()).To(Equal(int64(1)))
+ client.ConfigSet(ctx, "maxmemory-policy", "volatile-lfu")
+ freq := client.ObjectFreq(ctx, "key")
+ Expect(freq.Err()).NotTo(HaveOccurred())
+ client.ConfigSet(ctx, "maxmemory-policy", "noeviction") // default
+
err := client.ObjectEncoding(ctx, "key").Err()
Expect(err).NotTo(HaveOccurred())
@@ -477,7 +702,7 @@ var _ = Describe("Commands", func() {
// if too much time (>1s) is used during command execution, it may also cause the test to fail.
// so the ObjectIdleTime result should be <=now-start+1s
// link: https://github.com/redis/redis/blob/5b48d900498c85bbf4772c1d466c214439888115/src/object.c#L1265-L1272
- Expect(idleTime.Val()).To(BeNumerically("<=", time.Now().Sub(start)+time.Second))
+ Expect(idleTime.Val()).To(BeNumerically("<=", time.Since(start)+time.Second))
})
It("should Persist", func() {
@@ -540,6 +765,27 @@ var _ = Describe("Commands", func() {
Expect(pttl.Val()).To(BeNumerically("~", expiration, 100*time.Millisecond))
})
+ It("should PExpireTime", func() {
+ // The command returns -1 if the key exists but has no associated expiration time.
+ // The command returns -2 if the key does not exist.
+ pExpireTime := client.PExpireTime(ctx, "key")
+ Expect(pExpireTime.Err()).NotTo(HaveOccurred())
+ Expect(pExpireTime.Val() < 0).To(Equal(true))
+
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ timestamp := time.Now().Add(time.Minute)
+ expireAt := client.PExpireAt(ctx, "key", timestamp)
+ Expect(expireAt.Err()).NotTo(HaveOccurred())
+ Expect(expireAt.Val()).To(Equal(true))
+
+ pExpireTime = client.PExpireTime(ctx, "key")
+ Expect(pExpireTime.Err()).NotTo(HaveOccurred())
+ Expect(pExpireTime.Val().Milliseconds()).To(BeNumerically("==", timestamp.UnixMilli()))
+ })
+
It("should PTTL", func() {
set := client.Set(ctx, "key", "Hello", 0)
Expect(set.Err()).NotTo(HaveOccurred())
@@ -569,7 +815,7 @@ var _ = Describe("Commands", func() {
Expect(randomKey.Val()).To(Equal("key"))
})
- It("should Rename", func() {
+ It("should Rename", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key", "hello", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -583,7 +829,7 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("hello"))
})
- It("should RenameNX", func() {
+ It("should RenameNX", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key", "hello", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -640,6 +886,28 @@ var _ = Describe("Commands", func() {
Expect(val).To(Equal("hello"))
})
+ It("should Sort RO", func() {
+ size, err := client.LPush(ctx, "list", "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(1)))
+
+ size, err = client.LPush(ctx, "list", "3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(2)))
+
+ size, err = client.LPush(ctx, "list", "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(3)))
+
+ els, err := client.SortRO(ctx, "list", &redis.Sort{
+ Offset: 0,
+ Count: 2,
+ Order: "ASC",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(els).To(Equal([]string{"1", "2"}))
+ })
+
It("should Sort", func() {
size, err := client.LPush(ctx, "list", "1").Result()
Expect(err).NotTo(HaveOccurred())
@@ -662,7 +930,7 @@ var _ = Describe("Commands", func() {
Expect(els).To(Equal([]string{"1", "2"}))
})
- It("should Sort and Get", func() {
+ It("should Sort and Get", Label("NonRedisEnterprise"), func() {
size, err := client.LPush(ctx, "list", "1").Result()
Expect(err).NotTo(HaveOccurred())
Expect(size).To(Equal(int64(1)))
@@ -695,7 +963,7 @@ var _ = Describe("Commands", func() {
}
})
- It("should Sort and Store", func() {
+ It("should Sort and Store", Label("NonRedisEnterprise"), func() {
size, err := client.LPush(ctx, "list", "1").Result()
Expect(err).NotTo(HaveOccurred())
Expect(size).To(Equal(int64(1)))
@@ -735,7 +1003,30 @@ var _ = Describe("Commands", func() {
Expect(touch.Val()).To(Equal(int64(2)))
})
+ It("should ExpireTime", func() {
+ // The command returns -1 if the key exists but has no associated expiration time.
+ // The command returns -2 if the key does not exist.
+ expireTimeCmd := client.ExpireTime(ctx, "key")
+ Expect(expireTimeCmd.Err()).NotTo(HaveOccurred())
+ Expect(expireTimeCmd.Val() < 0).To(Equal(true))
+
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expireAt := time.Now().Add(time.Minute)
+ expireAtCmd := client.ExpireAt(ctx, "key", expireAt)
+ Expect(expireAtCmd.Err()).NotTo(HaveOccurred())
+ Expect(expireAtCmd.Val()).To(Equal(true))
+
+ expireTimeCmd = client.ExpireTime(ctx, "key")
+ Expect(expireTimeCmd.Err()).NotTo(HaveOccurred())
+ Expect(expireTimeCmd.Val().Seconds()).To(BeNumerically("==", expireAt.Unix()))
+ })
+
It("should TTL", func() {
+ // The command returns -1 if the key exists but has no associated expire
+ // The command returns -2 if the key does not exist.
ttl := client.TTL(ctx, "key")
Expect(ttl.Err()).NotTo(HaveOccurred())
Expect(ttl.Val() < 0).To(Equal(true))
@@ -815,7 +1106,7 @@ var _ = Describe("Commands", func() {
It("should ZScan", func() {
for i := 0; i < 1000; i++ {
- err := client.ZAdd(ctx, "myset", &redis.Z{
+ err := client.ZAdd(ctx, "myset", redis.Z{
Score: float64(i),
Member: fmt.Sprintf("member%d", i),
}).Err()
@@ -835,13 +1126,13 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(int64(0)))
- append := client.Append(ctx, "key", "Hello")
- Expect(append.Err()).NotTo(HaveOccurred())
- Expect(append.Val()).To(Equal(int64(5)))
+ appendRes := client.Append(ctx, "key", "Hello")
+ Expect(appendRes.Err()).NotTo(HaveOccurred())
+ Expect(appendRes.Val()).To(Equal(int64(5)))
- append = client.Append(ctx, "key", " World")
- Expect(append.Err()).NotTo(HaveOccurred())
- Expect(append.Val()).To(Equal(int64(11)))
+ appendRes = client.Append(ctx, "key", " World")
+ Expect(appendRes.Err()).NotTo(HaveOccurred())
+ Expect(appendRes.Val()).To(Equal(int64(11)))
get := client.Get(ctx, "key")
Expect(get.Err()).NotTo(HaveOccurred())
@@ -872,7 +1163,7 @@ var _ = Describe("Commands", func() {
Expect(bitCount.Val()).To(Equal(int64(6)))
})
- It("should BitOpAnd", func() {
+ It("should BitOpAnd", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key1", "1", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -890,7 +1181,7 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("0"))
})
- It("should BitOpOr", func() {
+ It("should BitOpOr", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key1", "1", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -908,7 +1199,7 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("1"))
})
- It("should BitOpXor", func() {
+ It("should BitOpXor", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key1", "\xff", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -926,7 +1217,7 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("\xf0"))
})
- It("should BitOpNot", func() {
+ It("should BitOpNot", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key1", "\x00", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -981,10 +1272,41 @@ var _ = Describe("Commands", func() {
Expect(pos).To(Equal(int64(-1)))
})
+ It("should BitPosSpan", func() {
+ err := client.Set(ctx, "mykey", "\x00\xff\x00", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ pos, err := client.BitPosSpan(ctx, "mykey", 0, 1, 3, "byte").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(16)))
+
+ pos, err = client.BitPosSpan(ctx, "mykey", 0, 1, 3, "bit").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(1)))
+ })
+
It("should BitField", func() {
nn, err := client.BitField(ctx, "mykey", "INCRBY", "i5", 100, 1, "GET", "u4", 0).Result()
Expect(err).NotTo(HaveOccurred())
Expect(nn).To(Equal([]int64{1, 0}))
+
+ nn, err = client.BitField(ctx, "mykey", "set", "i1", 1, 1, "GET", "u4", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nn).To(Equal([]int64{0, 4}))
+ })
+
+ It("should BitFieldRO", func() {
+ nn, err := client.BitField(ctx, "mykey", "SET", "u8", 8, 255).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nn).To(Equal([]int64{0}))
+
+ nn, err = client.BitFieldRO(ctx, "mykey", "u8", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nn).To(Equal([]int64{0}))
+
+ nn, err = client.BitFieldRO(ctx, "mykey", "u8", 0, "u4", 8, "u4", 12, "u4", 13).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nn).To(Equal([]int64{0, 15, 15, 14}))
})
It("should Decr", func() {
@@ -1170,25 +1492,61 @@ var _ = Describe("Commands", func() {
mGet := client.MGet(ctx, "key1", "key2", "_")
Expect(mGet.Err()).NotTo(HaveOccurred())
Expect(mGet.Val()).To(Equal([]interface{}{"hello1", "hello2", nil}))
+
+ // MSet struct
+ type set struct {
+ Set1 string `redis:"set1"`
+ Set2 int16 `redis:"set2"`
+ Set3 time.Duration `redis:"set3"`
+ Set4 interface{} `redis:"set4"`
+ Set5 map[string]interface{} `redis:"-"`
+ }
+ mSet = client.MSet(ctx, &set{
+ Set1: "val1",
+ Set2: 1024,
+ Set3: 2 * time.Millisecond,
+ Set4: nil,
+ Set5: map[string]interface{}{"k1": 1},
+ })
+ Expect(mSet.Err()).NotTo(HaveOccurred())
+ Expect(mSet.Val()).To(Equal("OK"))
+
+ mGet = client.MGet(ctx, "set1", "set2", "set3", "set4")
+ Expect(mGet.Err()).NotTo(HaveOccurred())
+ Expect(mGet.Val()).To(Equal([]interface{}{
+ "val1",
+ "1024",
+ strconv.Itoa(int(2 * time.Millisecond.Nanoseconds())),
+ "",
+ }))
})
It("should scan Mget", func() {
- err := client.MSet(ctx, "key1", "hello1", "key2", 123).Err()
+ now := time.Now()
+
+ err := client.MSet(ctx, "key1", "hello1", "key2", 123, "time", now.Format(time.RFC3339Nano)).Err()
Expect(err).NotTo(HaveOccurred())
- res := client.MGet(ctx, "key1", "key2", "_")
+ res := client.MGet(ctx, "key1", "key2", "_", "time")
Expect(res.Err()).NotTo(HaveOccurred())
type data struct {
- Key1 string `redis:"key1"`
- Key2 int `redis:"key2"`
+ Key1 string `redis:"key1"`
+ Key2 int `redis:"key2"`
+ Time TimeValue `redis:"time"`
}
var d data
Expect(res.Scan(&d)).NotTo(HaveOccurred())
- Expect(d).To(Equal(data{Key1: "hello1", Key2: 123}))
+ Expect(d.Time.UnixNano()).To(Equal(now.UnixNano()))
+ d.Time.Time = time.Time{}
+ Expect(d).To(Equal(data{
+ Key1: "hello1",
+ Key2: 123,
+ Time: TimeValue{Time: time.Time{}},
+ }))
})
- It("should MSetNX", func() {
+ It("should MSetNX", Label("NonRedisEnterprise"), func() {
mSetNX := client.MSetNX(ctx, "key1", "hello1", "key2", "hello2")
Expect(mSetNX.Err()).NotTo(HaveOccurred())
Expect(mSetNX.Val()).To(Equal(true))
@@ -1196,6 +1554,25 @@ var _ = Describe("Commands", func() {
mSetNX = client.MSetNX(ctx, "key2", "hello1", "key3", "hello2")
Expect(mSetNX.Err()).NotTo(HaveOccurred())
Expect(mSetNX.Val()).To(Equal(false))
+
+ // set struct
+ // MSet struct
+ type set struct {
+ Set1 string `redis:"set1"`
+ Set2 int16 `redis:"set2"`
+ Set3 time.Duration `redis:"set3"`
+ Set4 interface{} `redis:"set4"`
+ Set5 map[string]interface{} `redis:"-"`
+ }
+ mSetNX = client.MSetNX(ctx, &set{
+ Set1: "val1",
+ Set2: 1024,
+ Set3: 2 * time.Millisecond,
+ Set4: nil,
+ Set5: map[string]interface{}{"k1": 1},
+ })
+ Expect(mSetNX.Err()).NotTo(HaveOccurred())
+ Expect(mSetNX.Val()).To(Equal(true))
})
It("should SetWithArgs with TTL", func() {
@@ -1297,7 +1674,7 @@ var _ = Describe("Commands", func() {
Get: true,
}
val, err := client.SetArgs(ctx, "key", "hello", args).Result()
- Expect(err).To(Equal(proto.RedisError("ERR syntax error")))
+ Expect(err).To(Equal(redis.Nil))
Expect(val).To(Equal(""))
})
@@ -1335,7 +1712,7 @@ var _ = Describe("Commands", func() {
Get: true,
}
val, err := client.SetArgs(ctx, "key", "hello", args).Result()
- Expect(err).To(Equal(proto.RedisError("ERR syntax error")))
+ Expect(err).To(Equal(redis.Nil))
Expect(val).To(Equal(""))
})
@@ -1496,7 +1873,7 @@ var _ = Describe("Commands", func() {
})
It("should SetEX", func() {
- err := client.SetEX(ctx, "key", "hello", 1*time.Second).Err()
+ err := client.SetEx(ctx, "key", "hello", 1*time.Second).Err()
Expect(err).NotTo(HaveOccurred())
val, err := client.Get(ctx, "key").Result()
@@ -1634,7 +2011,7 @@ var _ = Describe("Commands", func() {
Expect(strLen.Val()).To(Equal(int64(0)))
})
- It("should Copy", func() {
+ It("should Copy", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key", "hello", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -1659,6 +2036,105 @@ var _ = Describe("Commands", func() {
replace := client.Copy(ctx, "newKey", "key", redisOptions().DB, true)
Expect(replace.Val()).To(Equal(int64(1)))
})
+
+ It("should acl dryrun", func() {
+ dryRun := client.ACLDryRun(ctx, "default", "get", "randomKey")
+ Expect(dryRun.Err()).NotTo(HaveOccurred())
+ Expect(dryRun.Val()).To(Equal("OK"))
+ })
+
+ It("should fail module loadex", Label("NonRedisEnterprise"), func() {
+ dryRun := client.ModuleLoadex(ctx, &redis.ModuleLoadexConfig{
+ Path: "/path/to/non-existent-library.so",
+ Conf: map[string]interface{}{
+ "param1": "value1",
+ },
+ Args: []interface{}{
+ "arg1",
+ },
+ })
+ Expect(dryRun.Err()).To(HaveOccurred())
+ Expect(dryRun.Err().Error()).To(Equal("ERR Error loading the extension. Please check the server logs."))
+ })
+
+ It("converts the module loadex configuration to a slice of arguments correctly", func() {
+ conf := &redis.ModuleLoadexConfig{
+ Path: "/path/to/your/module.so",
+ Conf: map[string]interface{}{
+ "param1": "value1",
+ },
+ Args: []interface{}{
+ "arg1",
+ "arg2",
+ 3,
+ },
+ }
+
+ args := conf.ToArgs()
+
+ // Test if the arguments are in the correct order
+ expectedArgs := []interface{}{
+ "MODULE",
+ "LOADEX",
+ "/path/to/your/module.so",
+ "CONFIG",
+ "param1",
+ "value1",
+ "ARGS",
+ "arg1",
+ "ARGS",
+ "arg2",
+ "ARGS",
+ 3,
+ }
+
+ Expect(args).To(Equal(expectedArgs))
+ })
+
+ It("should ACL LOG", Label("NonRedisEnterprise"), func() {
+ err := client.Do(ctx, "acl", "setuser", "test", ">test", "on", "allkeys", "+get").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ clientAcl := redis.NewClient(redisOptions())
+ clientAcl.Options().Username = "test"
+ clientAcl.Options().Password = "test"
+ clientAcl.Options().DB = 0
+ _ = clientAcl.Set(ctx, "mystring", "foo", 0).Err()
+ _ = clientAcl.HSet(ctx, "myhash", "foo", "bar").Err()
+ _ = clientAcl.SAdd(ctx, "myset", "foo", "bar").Err()
+
+ logEntries, err := client.ACLLog(ctx, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(logEntries)).To(Equal(4))
+
+ for _, entry := range logEntries {
+ Expect(entry.Reason).To(Equal("command"))
+ Expect(entry.Context).To(Equal("toplevel"))
+ Expect(entry.Object).NotTo(BeEmpty())
+ Expect(entry.Username).To(Equal("test"))
+ Expect(entry.AgeSeconds).To(BeNumerically(">=", 0))
+ Expect(entry.ClientInfo).NotTo(BeNil())
+ Expect(entry.EntryID).To(BeNumerically(">=", 0))
+ Expect(entry.TimestampCreated).To(BeNumerically(">=", 0))
+ Expect(entry.TimestampLastUpdated).To(BeNumerically(">=", 0))
+ }
+
+ limitedLogEntries, err := client.ACLLog(ctx, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(limitedLogEntries)).To(Equal(2))
+ })
+
+ It("should ACL LOG RESET", Label("NonRedisEnterprise"), func() {
+ // Call ACL LOG RESET
+ resetCmd := client.ACLLogReset(ctx)
+ Expect(resetCmd.Err()).NotTo(HaveOccurred())
+ Expect(resetCmd.Val()).To(Equal("OK"))
+
+ // Verify that the log is empty after the reset
+ logEntries, err := client.ACLLog(ctx, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(logEntries)).To(Equal(0))
+ })
})
Describe("hashes", func() {
@@ -1713,19 +2189,47 @@ var _ = Describe("Commands", func() {
})
It("should scan", func() {
- err := client.HMSet(ctx, "hash", "key1", "hello1", "key2", 123).Err()
+ now := time.Now()
+
+ err := client.HMSet(ctx, "hash", "key1", "hello1", "key2", 123, "time", now.Format(time.RFC3339Nano)).Err()
Expect(err).NotTo(HaveOccurred())
res := client.HGetAll(ctx, "hash")
Expect(res.Err()).NotTo(HaveOccurred())
type data struct {
- Key1 string `redis:"key1"`
- Key2 int `redis:"key2"`
+ Key1 string `redis:"key1"`
+ Key2 int `redis:"key2"`
+ Time TimeValue `redis:"time"`
}
var d data
Expect(res.Scan(&d)).NotTo(HaveOccurred())
- Expect(d).To(Equal(data{Key1: "hello1", Key2: 123}))
+ Expect(d.Time.UnixNano()).To(Equal(now.UnixNano()))
+ d.Time.Time = time.Time{}
+ Expect(d).To(Equal(data{
+ Key1: "hello1",
+ Key2: 123,
+ Time: TimeValue{Time: time.Time{}},
+ }))
+
+ type data2 struct {
+ Key1 string `redis:"key1"`
+ Key2 int `redis:"key2"`
+ Time time.Time `redis:"time"`
+ }
+ err = client.HSet(ctx, "hash", &data2{
+ Key1: "hello2",
+ Key2: 200,
+ Time: now,
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var d2 data2
+ err = client.HMGet(ctx, "hash", "key1", "key2", "time").Scan(&d2)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(d2.Key1).To(Equal("hello2"))
+ Expect(d2.Key2).To(Equal(200))
+ Expect(d2.Time.Unix()).To(Equal(now.Unix()))
})
It("should HIncrBy", func() {
@@ -1827,6 +2331,52 @@ var _ = Describe("Commands", func() {
hGet := client.HGet(ctx, "hash", "key")
Expect(hGet.Err()).NotTo(HaveOccurred())
Expect(hGet.Val()).To(Equal("hello"))
+
+ // set struct
+ // MSet struct
+ type set struct {
+ Set1 string `redis:"set1"`
+ Set2 int16 `redis:"set2"`
+ Set3 time.Duration `redis:"set3"`
+ Set4 interface{} `redis:"set4"`
+ Set5 map[string]interface{} `redis:"-"`
+ Set6 string `redis:"set6,omitempty"`
+ }
+
+ hSet = client.HSet(ctx, "hash", &set{
+ Set1: "val1",
+ Set2: 1024,
+ Set3: 2 * time.Millisecond,
+ Set4: nil,
+ Set5: map[string]interface{}{"k1": 1},
+ })
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ Expect(hSet.Val()).To(Equal(int64(4)))
+
+ hMGet := client.HMGet(ctx, "hash", "set1", "set2", "set3", "set4", "set5", "set6")
+ Expect(hMGet.Err()).NotTo(HaveOccurred())
+ Expect(hMGet.Val()).To(Equal([]interface{}{
+ "val1",
+ "1024",
+ strconv.Itoa(int(2 * time.Millisecond.Nanoseconds())),
+ "",
+ nil,
+ nil,
+ }))
+
+ hSet = client.HSet(ctx, "hash2", &set{
+ Set1: "val2",
+ Set6: "val",
+ })
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ Expect(hSet.Val()).To(Equal(int64(5)))
+
+ hMGet = client.HMGet(ctx, "hash2", "set1", "set6")
+ Expect(hMGet.Err()).NotTo(HaveOccurred())
+ Expect(hMGet.Val()).To(Equal([]interface{}{
+ "val2",
+ "val",
+ }))
})
It("should HSetNX", func() {
@@ -1865,23 +2415,25 @@ var _ = Describe("Commands", func() {
err = client.HSet(ctx, "hash", "key2", "hello2").Err()
Expect(err).NotTo(HaveOccurred())
- v := client.HRandField(ctx, "hash", 1, false)
+ v := client.HRandField(ctx, "hash", 1)
Expect(v.Err()).NotTo(HaveOccurred())
Expect(v.Val()).To(Or(Equal([]string{"key1"}), Equal([]string{"key2"})))
- v = client.HRandField(ctx, "hash", 0, false)
+ v = client.HRandField(ctx, "hash", 0)
Expect(v.Err()).NotTo(HaveOccurred())
Expect(v.Val()).To(HaveLen(0))
- var slice []string
- err = client.HRandField(ctx, "hash", 1, true).ScanSlice(&slice)
+ kv, err := client.HRandFieldWithValues(ctx, "hash", 1).Result()
Expect(err).NotTo(HaveOccurred())
- Expect(slice).To(Or(Equal([]string{"key1", "hello1"}), Equal([]string{"key2", "hello2"})))
+ Expect(kv).To(Or(
+ Equal([]redis.KeyValue{{Key: "key1", Value: "hello1"}}),
+ Equal([]redis.KeyValue{{Key: "key2", Value: "hello2"}}),
+ ))
})
})
Describe("hyperloglog", func() {
- It("should PFMerge", func() {
+ It("should PFMerge", Label("NonRedisEnterprise"), func() {
pfAdd := client.PFAdd(ctx, "hll1", "1", "2", "3", "4", "5")
Expect(pfAdd.Err()).NotTo(HaveOccurred())
@@ -1906,7 +2458,7 @@ var _ = Describe("Commands", func() {
})
Describe("lists", func() {
- It("should BLPop", func() {
+ It("should BLPop", Label("NonRedisEnterprise"), func() {
rPush := client.RPush(ctx, "list1", "a", "b", "c")
Expect(rPush.Err()).NotTo(HaveOccurred())
@@ -1960,7 +2512,7 @@ var _ = Describe("Commands", func() {
Expect(stats.Timeouts).To(Equal(uint32(0)))
})
- It("should BRPop", func() {
+ It("should BRPop", Label("NonRedisEnterprise"), func() {
rPush := client.RPush(ctx, "list1", "a", "b", "c")
Expect(rPush.Err()).NotTo(HaveOccurred())
@@ -2002,7 +2554,7 @@ var _ = Describe("Commands", func() {
}
})
- It("should BRPopLPush", func() {
+ It("should BRPopLPush", Label("NonRedisEnterprise"), func() {
_, err := client.BRPopLPush(ctx, "list1", "list2", time.Second).Result()
Expect(err).To(Equal(redis.Nil))
@@ -2014,6 +2566,86 @@ var _ = Describe("Commands", func() {
Expect(v).To(Equal("c"))
})
+ It("should LCS", Label("NonRedisEnterprise"), func() {
+ err := client.MSet(ctx, "key1", "ohmytext", "key2", "mynewtext").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ lcs, err := client.LCS(ctx, &redis.LCSQuery{
+ Key1: "key1",
+ Key2: "key2",
+ }).Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lcs.MatchString).To(Equal("mytext"))
+
+ lcs, err = client.LCS(ctx, &redis.LCSQuery{
+ Key1: "nonexistent_key1",
+ Key2: "key2",
+ }).Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lcs.MatchString).To(Equal(""))
+
+ lcs, err = client.LCS(ctx, &redis.LCSQuery{
+ Key1: "key1",
+ Key2: "key2",
+ Len: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lcs.MatchString).To(Equal(""))
+ Expect(lcs.Len).To(Equal(int64(6)))
+
+ lcs, err = client.LCS(ctx, &redis.LCSQuery{
+ Key1: "key1",
+ Key2: "key2",
+ Idx: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lcs.MatchString).To(Equal(""))
+ Expect(lcs.Len).To(Equal(int64(6)))
+ Expect(lcs.Matches).To(Equal([]redis.LCSMatchedPosition{
+ {
+ Key1: redis.LCSPosition{Start: 4, End: 7},
+ Key2: redis.LCSPosition{Start: 5, End: 8},
+ MatchLen: 0,
+ },
+ {
+ Key1: redis.LCSPosition{Start: 2, End: 3},
+ Key2: redis.LCSPosition{Start: 0, End: 1},
+ MatchLen: 0,
+ },
+ }))
+
+ lcs, err = client.LCS(ctx, &redis.LCSQuery{
+ Key1: "key1",
+ Key2: "key2",
+ Idx: true,
+ MinMatchLen: 3,
+ WithMatchLen: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lcs.MatchString).To(Equal(""))
+ Expect(lcs.Len).To(Equal(int64(6)))
+ Expect(lcs.Matches).To(Equal([]redis.LCSMatchedPosition{
+ {
+ Key1: redis.LCSPosition{Start: 4, End: 7},
+ Key2: redis.LCSPosition{Start: 5, End: 8},
+ MatchLen: 4,
+ },
+ }))
+
+ _, err = client.Set(ctx, "keywithstringvalue", "golang", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.LPush(ctx, "keywithnonstringvalue", "somevalue").Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.LCS(ctx, &redis.LCSQuery{
+ Key1: "keywithstringvalue",
+ Key2: "keywithnonstringvalue",
+ }).Result()
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(Equal("ERR The specified keys must contain string values"))
+ })
+
It("should LIndex", func() {
lPush := client.LPush(ctx, "list", "World")
Expect(lPush.Err()).NotTo(HaveOccurred())
@@ -2048,6 +2680,120 @@ var _ = Describe("Commands", func() {
Expect(lRange.Val()).To(Equal([]string{"Hello", "There", "World"}))
})
+ It("should LMPop", Label("NonRedisEnterprise"), func() {
+ err := client.LPush(ctx, "list1", "one", "two", "three", "four", "five").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.LPush(ctx, "list2", "a", "b", "c", "d", "e").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, val, err := client.LMPop(ctx, "left", 3, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list1"))
+ Expect(val).To(Equal([]string{"five", "four", "three"}))
+
+ key, val, err = client.LMPop(ctx, "right", 3, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list1"))
+ Expect(val).To(Equal([]string{"one", "two"}))
+
+ key, val, err = client.LMPop(ctx, "left", 1, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list2"))
+ Expect(val).To(Equal([]string{"e"}))
+
+ key, val, err = client.LMPop(ctx, "right", 10, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list2"))
+ Expect(val).To(Equal([]string{"a", "b", "c", "d"}))
+
+ err = client.LMPop(ctx, "left", 10, "list1", "list2").Err()
+ Expect(err).To(Equal(redis.Nil))
+
+ err = client.Set(ctx, "list3", 1024, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.LMPop(ctx, "left", 10, "list1", "list2", "list3").Err()
+ Expect(err.Error()).To(Equal("WRONGTYPE Operation against a key holding the wrong kind of value"))
+
+ err = client.LMPop(ctx, "right", 0, "list1", "list2").Err()
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("should BLMPop", Label("NonRedisEnterprise"), func() {
+ err := client.LPush(ctx, "list1", "one", "two", "three", "four", "five").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.LPush(ctx, "list2", "a", "b", "c", "d", "e").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, val, err := client.BLMPop(ctx, 0, "left", 3, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list1"))
+ Expect(val).To(Equal([]string{"five", "four", "three"}))
+
+ key, val, err = client.BLMPop(ctx, 0, "right", 3, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list1"))
+ Expect(val).To(Equal([]string{"one", "two"}))
+
+ key, val, err = client.BLMPop(ctx, 0, "left", 1, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list2"))
+ Expect(val).To(Equal([]string{"e"}))
+
+ key, val, err = client.BLMPop(ctx, 0, "right", 10, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list2"))
+ Expect(val).To(Equal([]string{"a", "b", "c", "d"}))
+ })
+
+ It("should BLMPopBlocks", func() {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ key, val, err := client.BLMPop(ctx, 0, "left", 1, "list_list").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list_list"))
+ Expect(val).To(Equal([]string{"a"}))
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ Fail("BLMPop is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ _, err := client.LPush(ctx, "list_list", "a").Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("BLMPop is still blocked")
+ }
+ })
+
+ It("should BLMPop timeout", func() {
+ _, val, err := client.BLMPop(ctx, time.Second, "left", 1, "list1").Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(BeNil())
+
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+
+ stats := client.PoolStats()
+ Expect(stats.Hits).To(Equal(uint32(2)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
It("should LLen", func() {
lPush := client.LPush(ctx, "list", "World")
Expect(lPush.Err()).NotTo(HaveOccurred())
@@ -2308,7 +3054,7 @@ var _ = Describe("Commands", func() {
Expect(lRange.Val()).To(Equal([]string{"one", "two"}))
})
- It("should RPopLPush", func() {
+ It("should RPopLPush", Label("NonRedisEnterprise"), func() {
rPush := client.RPush(ctx, "list", "one")
Expect(rPush.Err()).NotTo(HaveOccurred())
rPush = client.RPush(ctx, "list", "two")
@@ -2377,7 +3123,7 @@ var _ = Describe("Commands", func() {
Expect(lRange.Val()).To(Equal([]string{}))
})
- It("should LMove", func() {
+ It("should LMove", Label("NonRedisEnterprise"), func() {
rPush := client.RPush(ctx, "lmove1", "ichi")
Expect(rPush.Err()).NotTo(HaveOccurred())
Expect(rPush.Val()).To(Equal(int64(1)))
@@ -2399,7 +3145,7 @@ var _ = Describe("Commands", func() {
Expect(lRange.Val()).To(Equal([]string{"san"}))
})
- It("should BLMove", func() {
+ It("should BLMove", Label("NonRedisEnterprise"), func() {
rPush := client.RPush(ctx, "blmove1", "ichi")
Expect(rPush.Err()).NotTo(HaveOccurred())
Expect(rPush.Val()).To(Equal(int64(1)))
@@ -2466,7 +3212,7 @@ var _ = Describe("Commands", func() {
Expect(sCard.Val()).To(Equal(int64(2)))
})
- It("should SDiff", func() {
+ It("should SDiff", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2486,7 +3232,7 @@ var _ = Describe("Commands", func() {
Expect(sDiff.Val()).To(ConsistOf([]string{"a", "b"}))
})
- It("should SDiffStore", func() {
+ It("should SDiffStore", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2510,7 +3256,7 @@ var _ = Describe("Commands", func() {
Expect(sMembers.Val()).To(ConsistOf([]string{"a", "b"}))
})
- It("should SInter", func() {
+ It("should SInter", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2530,7 +3276,37 @@ var _ = Describe("Commands", func() {
Expect(sInter.Val()).To(Equal([]string{"c"}))
})
- It("should SInterStore", func() {
+ It("should SInterCard", Label("NonRedisEnterprise"), func() {
+ sAdd := client.SAdd(ctx, "set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd(ctx, "set2", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ // limit 0 means no limit,see https://redis.io/commands/sintercard/ for more details
+ sInterCard := client.SInterCard(ctx, 0, "set1", "set2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(2)))
+
+ sInterCard = client.SInterCard(ctx, 1, "set1", "set2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(1)))
+
+ sInterCard = client.SInterCard(ctx, 3, "set1", "set2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(2)))
+ })
+
+ It("should SInterStore", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2598,7 +3374,7 @@ var _ = Describe("Commands", func() {
Expect(sMembersMap.Val()).To(Equal(map[string]struct{}{"Hello": {}, "World": {}}))
})
- It("should SMove", func() {
+ It("should SMove", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "one")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "two")
@@ -2706,7 +3482,7 @@ var _ = Describe("Commands", func() {
Expect(sMembers.Val()).To(ConsistOf([]string{"three", "two"}))
})
- It("should SUnion", func() {
+ It("should SUnion", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2726,7 +3502,7 @@ var _ = Describe("Commands", func() {
Expect(sUnion.Val()).To(HaveLen(5))
})
- It("should SUnionStore", func() {
+ It("should SUnionStore", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2752,18 +3528,18 @@ var _ = Describe("Commands", func() {
})
Describe("sorted sets", func() {
- It("should BZPopMax", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{
+ It("should BZPopMax", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{
+ err = client.ZAdd(ctx, "zset1", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{
+ err = client.ZAdd(ctx, "zset1", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -2807,7 +3583,7 @@ var _ = Describe("Commands", func() {
// ok
}
- zAdd := client.ZAdd(ctx, "zset", &redis.Z{
+ zAdd := client.ZAdd(ctx, "zset", redis.Z{
Member: "a",
Score: 1,
})
@@ -2834,18 +3610,18 @@ var _ = Describe("Commands", func() {
Expect(stats.Timeouts).To(Equal(uint32(0)))
})
- It("should BZPopMin", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{
+ It("should BZPopMin", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{
+ err = client.ZAdd(ctx, "zset1", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{
+ err = client.ZAdd(ctx, "zset1", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -2889,7 +3665,7 @@ var _ = Describe("Commands", func() {
// ok
}
- zAdd := client.ZAdd(ctx, "zset", &redis.Z{
+ zAdd := client.ZAdd(ctx, "zset", redis.Z{
Member: "a",
Score: 1,
})
@@ -2917,28 +3693,28 @@ var _ = Describe("Commands", func() {
})
It("should ZAdd", func() {
- added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ added, err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "uno",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "two",
}).Result()
@@ -2960,28 +3736,28 @@ var _ = Describe("Commands", func() {
})
It("should ZAdd bytes", func() {
- added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ added, err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: []byte("one"),
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: []byte("uno"),
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: []byte("two"),
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: []byte("two"),
}).Result()
@@ -3002,7 +3778,7 @@ var _ = Describe("Commands", func() {
}}))
})
- It("should ZAddArgs", func() {
+ It("should ZAddArgsGTAndLT", func() {
// Test only the GT+LT options.
added, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
GT: true,
@@ -3038,8 +3814,78 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
})
- It("should ZAddNX", func() {
- added, err := client.ZAddNX(ctx, "zset", &redis.Z{
+ It("should ZAddArgsLT", func() {
+ added, err := client.ZAddLT(ctx, "zset", redis.Z{
+ Score: 2,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+
+ added, err = client.ZAddLT(ctx, "zset", redis.Z{
+ Score: 3,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+
+ added, err = client.ZAddLT(ctx, "zset", redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+ })
+
+ It("should ZAddArgsGT", func() {
+ added, err := client.ZAddGT(ctx, "zset", redis.Z{
+ Score: 2,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+
+ added, err = client.ZAddGT(ctx, "zset", redis.Z{
+ Score: 3,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 3, Member: "one"}}))
+
+ added, err = client.ZAddGT(ctx, "zset", redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 3, Member: "one"}}))
+ })
+
+ It("should ZAddArgsNX", func() {
+ added, err := client.ZAddNX(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
@@ -3050,7 +3896,7 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
- added, err = client.ZAddNX(ctx, "zset", &redis.Z{
+ added, err = client.ZAddNX(ctx, "zset", redis.Z{
Score: 2,
Member: "one",
}).Result()
@@ -3062,8 +3908,8 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
})
- It("should ZAddXX", func() {
- added, err := client.ZAddXX(ctx, "zset", &redis.Z{
+ It("should ZAddArgsXX", func() {
+ added, err := client.ZAddXX(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
@@ -3074,14 +3920,14 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(BeEmpty())
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAddXX(ctx, "zset", &redis.Z{
+ added, err = client.ZAddXX(ctx, "zset", redis.Z{
Score: 2,
Member: "one",
}).Result()
@@ -3093,28 +3939,33 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
})
- // TODO: remove in v9.
- It("should ZAddCh", func() {
- changed, err := client.ZAddCh(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsCh", func() {
+ changed, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(1)))
- changed, err = client.ZAddCh(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ changed, err = client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(0)))
})
- // TODO: remove in v9.
- It("should ZAddNXCh", func() {
- changed, err := client.ZAddNXCh(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsNXCh", func() {
+ changed, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ NX: true,
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(1)))
@@ -3123,9 +3974,12 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
- changed, err = client.ZAddNXCh(ctx, "zset", &redis.Z{
- Score: 2,
- Member: "one",
+ changed, err = client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ NX: true,
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 2, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(0)))
@@ -3138,11 +3992,13 @@ var _ = Describe("Commands", func() {
}}))
})
- // TODO: remove in v9.
- It("should ZAddXXCh", func() {
- changed, err := client.ZAddXXCh(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsXXCh", func() {
+ changed, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ XX: true,
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(0)))
@@ -3151,16 +4007,19 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(BeEmpty())
- added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ added, err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- changed, err = client.ZAddXXCh(ctx, "zset", &redis.Z{
- Score: 2,
- Member: "one",
+ changed, err = client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ XX: true,
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 2, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(1)))
@@ -3170,11 +4029,11 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
})
- // TODO: remove in v9.
- It("should ZIncr", func() {
- score, err := client.ZIncr(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsIncr", func() {
+ score, err := client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(score).To(Equal(float64(1)))
@@ -3183,9 +4042,10 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
- score, err = client.ZIncr(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ score, err = client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(score).To(Equal(float64(2)))
@@ -3195,11 +4055,12 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
})
- // TODO: remove in v9.
- It("should ZIncrNX", func() {
- score, err := client.ZIncrNX(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsIncrNX", func() {
+ score, err := client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ NX: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(score).To(Equal(float64(1)))
@@ -3208,9 +4069,11 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
- score, err = client.ZIncrNX(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ score, err = client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ NX: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).To(Equal(redis.Nil))
Expect(score).To(Equal(float64(0)))
@@ -3220,11 +4083,12 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
})
- // TODO: remove in v9.
- It("should ZIncrXX", func() {
- score, err := client.ZIncrXX(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsIncrXX", func() {
+ score, err := client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ XX: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).To(Equal(redis.Nil))
Expect(score).To(Equal(float64(0)))
@@ -3233,16 +4097,18 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(BeEmpty())
- added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ added, err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- score, err = client.ZIncrXX(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ score, err = client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ XX: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(score).To(Equal(float64(2)))
@@ -3253,12 +4119,12 @@ var _ = Describe("Commands", func() {
})
It("should ZCard", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
@@ -3270,17 +4136,17 @@ var _ = Describe("Commands", func() {
})
It("should ZCount", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -3300,12 +4166,12 @@ var _ = Describe("Commands", func() {
})
It("should ZIncrBy", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
@@ -3326,23 +4192,23 @@ var _ = Describe("Commands", func() {
}}))
})
- It("should ZInterStore", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{
+ It("should ZInterStore", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{
+ err = client.ZAdd(ctx, "zset1", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset3", &redis.Z{Score: 3, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset3", redis.Z{Score: 3, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
n, err := client.ZInterStore(ctx, "out", &redis.ZStore{
@@ -3363,17 +4229,218 @@ var _ = Describe("Commands", func() {
}}))
})
+ It("should ZMPop", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, elems, err := client.ZMPop(ctx, "min", 1, "zset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("zset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+
+ _, _, err = client.ZMPop(ctx, "min", 1, "nosuchkey").Result()
+ Expect(err).To(Equal(redis.Nil))
+
+ err = client.ZAdd(ctx, "myzset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "myzset", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "myzset", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, elems, err = client.ZMPop(ctx, "min", 1, "myzset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("myzset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+
+ key, elems, err = client.ZMPop(ctx, "max", 10, "myzset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("myzset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }, {
+ Score: 2,
+ Member: "two",
+ }}))
+
+ err = client.ZAdd(ctx, "myzset2", redis.Z{Score: 4, Member: "four"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "myzset2", redis.Z{Score: 5, Member: "five"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "myzset2", redis.Z{Score: 6, Member: "six"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, elems, err = client.ZMPop(ctx, "min", 10, "myzset", "myzset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("myzset2"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 4,
+ Member: "four",
+ }, {
+ Score: 5,
+ Member: "five",
+ }, {
+ Score: 6,
+ Member: "six",
+ }}))
+ })
+
+ It("should BZMPop", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, elems, err := client.BZMPop(ctx, 0, "min", 1, "zset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("zset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+ key, elems, err = client.BZMPop(ctx, 0, "max", 1, "zset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("zset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }}))
+ key, elems, err = client.BZMPop(ctx, 0, "min", 10, "zset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("zset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 2,
+ Member: "two",
+ }}))
+
+ key, elems, err = client.BZMPop(ctx, 0, "max", 10, "zset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("zset2"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }, {
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 1,
+ Member: "one",
+ }}))
+
+ err = client.ZAdd(ctx, "myzset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ key, elems, err = client.BZMPop(ctx, 0, "min", 10, "myzset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("myzset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+
+ err = client.ZAdd(ctx, "myzset2", redis.Z{Score: 4, Member: "four"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "myzset2", redis.Z{Score: 5, Member: "five"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, elems, err = client.BZMPop(ctx, 0, "min", 10, "myzset", "myzset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("myzset2"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 4,
+ Member: "four",
+ }, {
+ Score: 5,
+ Member: "five",
+ }}))
+ })
+
+ It("should BZMPopBlocks", func() {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ key, elems, err := client.BZMPop(ctx, 0, "min", 1, "list_list").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list_list"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ Fail("BZMPop is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ err := client.ZAdd(ctx, "list_list", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("BZMPop is still blocked")
+ }
+ })
+
+ It("should BZMPop timeout", func() {
+ _, val, err := client.BZMPop(ctx, time.Second, "min", 1, "list1").Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(BeNil())
+
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+
+ stats := client.PoolStats()
+ Expect(stats.Hits).To(Equal(uint32(2)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
It("should ZMScore", func() {
zmScore := client.ZMScore(ctx, "zset", "one", "three")
Expect(zmScore.Err()).NotTo(HaveOccurred())
Expect(zmScore.Val()).To(HaveLen(2))
Expect(zmScore.Val()[0]).To(Equal(float64(0)))
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zmScore = client.ZMScore(ctx, "zset", "one", "three")
@@ -3391,17 +4458,17 @@ var _ = Describe("Commands", func() {
})
It("should ZPopMax", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -3415,7 +4482,7 @@ var _ = Describe("Commands", func() {
}}))
// adding back 3
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -3431,12 +4498,12 @@ var _ = Describe("Commands", func() {
}}))
// adding back 2 & 3
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "three",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
@@ -3456,17 +4523,17 @@ var _ = Describe("Commands", func() {
})
It("should ZPopMin", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -3480,7 +4547,7 @@ var _ = Describe("Commands", func() {
}}))
// adding back 1
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
@@ -3496,13 +4563,13 @@ var _ = Describe("Commands", func() {
}}))
// adding back 1 & 2
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
@@ -3523,11 +4590,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRange", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRange := client.ZRange(ctx, "zset", 0, -1)
@@ -3544,11 +4611,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRangeWithScores", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
@@ -3642,11 +4709,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRangeByScore", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRangeByScore := client.ZRangeByScore(ctx, "zset", &redis.ZRangeBy{
@@ -3679,17 +4746,17 @@ var _ = Describe("Commands", func() {
})
It("should ZRangeByLex", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 0,
Member: "a",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 0,
Member: "b",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 0,
Member: "c",
}).Err()
@@ -3725,11 +4792,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRangeByScoreWithScoresMap", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
@@ -3776,7 +4843,7 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{}))
})
- It("should ZRangeStore", func() {
+ It("should ZRangeStore", Label("NonRedisEnterprise"), func() {
added, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
Members: []redis.Z{
{Score: 1, Member: "one"},
@@ -3806,11 +4873,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRank", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRank := client.ZRank(ctx, "zset", "three")
@@ -3822,12 +4889,37 @@ var _ = Describe("Commands", func() {
Expect(zRank.Val()).To(Equal(int64(0)))
})
+ It("should ZRankWithScore", func() {
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRankWithScore := client.ZRankWithScore(ctx, "zset", "one")
+ Expect(zRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 0, Score: 1}))
+
+ zRankWithScore = client.ZRankWithScore(ctx, "zset", "two")
+ Expect(zRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 1, Score: 2}))
+
+ zRankWithScore = client.ZRankWithScore(ctx, "zset", "three")
+ Expect(zRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 2, Score: 3}))
+
+ zRankWithScore = client.ZRankWithScore(ctx, "zset", "four")
+ Expect(zRankWithScore.Err()).To(HaveOccurred())
+ Expect(zRankWithScore.Err()).To(Equal(redis.Nil))
+ })
+
It("should ZRem", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRem := client.ZRem(ctx, "zset", "two")
@@ -3846,11 +4938,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRemRangeByRank", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRemRangeByRank := client.ZRemRangeByRank(ctx, "zset", 0, 1)
@@ -3866,11 +4958,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRemRangeByScore", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRemRangeByScore := client.ZRemRangeByScore(ctx, "zset", "-inf", "(2")
@@ -3889,7 +4981,7 @@ var _ = Describe("Commands", func() {
})
It("should ZRemRangeByLex", func() {
- zz := []*redis.Z{
+ zz := []redis.Z{
{Score: 0, Member: "aaaa"},
{Score: 0, Member: "b"},
{Score: 0, Member: "c"},
@@ -3916,11 +5008,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRange", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRevRange := client.ZRevRange(ctx, "zset", 0, -1)
@@ -3937,11 +5029,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRangeWithScoresMap", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
val, err := client.ZRevRangeWithScores(ctx, "zset", 0, -1).Result()
@@ -3973,11 +5065,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRangeByScore", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRevRangeByScore(
@@ -3997,11 +5089,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRangeByLex", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 0, Member: "a"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 0, Member: "a"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 0, Member: "b"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 0, Member: "b"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 0, Member: "c"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 0, Member: "c"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRevRangeByLex(
@@ -4021,11 +5113,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRangeByScoreWithScores", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRevRangeByScoreWithScores(
@@ -4044,11 +5136,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRangeByScoreWithScoresMap", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRevRangeByScoreWithScores(
@@ -4077,11 +5169,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRank", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRevRank := client.ZRevRank(ctx, "zset", "one")
@@ -4093,16 +5185,41 @@ var _ = Describe("Commands", func() {
Expect(zRevRank.Val()).To(Equal(int64(0)))
})
+ It("should ZRevRankWithScore", func() {
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRevRankWithScore := client.ZRevRankWithScore(ctx, "zset", "one")
+ Expect(zRevRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRevRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 2, Score: 1}))
+
+ zRevRankWithScore = client.ZRevRankWithScore(ctx, "zset", "two")
+ Expect(zRevRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRevRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 1, Score: 2}))
+
+ zRevRankWithScore = client.ZRevRankWithScore(ctx, "zset", "three")
+ Expect(zRevRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRevRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 0, Score: 3}))
+
+ zRevRankWithScore = client.ZRevRankWithScore(ctx, "zset", "four")
+ Expect(zRevRankWithScore.Err()).To(HaveOccurred())
+ Expect(zRevRankWithScore.Err()).To(Equal(redis.Nil))
+ })
+
It("should ZScore", func() {
- zAdd := client.ZAdd(ctx, "zset", &redis.Z{Score: 1.001, Member: "one"})
+ zAdd := client.ZAdd(ctx, "zset", redis.Z{Score: 1.001, Member: "one"})
Expect(zAdd.Err()).NotTo(HaveOccurred())
zScore := client.ZScore(ctx, "zset", "one")
Expect(zScore.Err()).NotTo(HaveOccurred())
- Expect(zScore.Val()).To(Equal(float64(1.001)))
+ Expect(zScore.Val()).To(Equal(1.001))
})
- It("should ZUnion", func() {
+ It("should ZUnion", Label("NonRedisEnterprise"), func() {
err := client.ZAddArgs(ctx, "zset1", redis.ZAddArgs{
Members: []redis.Z{
{Score: 1, Member: "one"},
@@ -4141,17 +5258,17 @@ var _ = Describe("Commands", func() {
}))
})
- It("should ZUnionStore", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZUnionStore", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
n, err := client.ZUnionStore(ctx, "out", &redis.ZStore{
@@ -4176,33 +5293,35 @@ var _ = Describe("Commands", func() {
})
It("should ZRandMember", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- v := client.ZRandMember(ctx, "zset", 1, false)
+ v := client.ZRandMember(ctx, "zset", 1)
Expect(v.Err()).NotTo(HaveOccurred())
Expect(v.Val()).To(Or(Equal([]string{"one"}), Equal([]string{"two"})))
- v = client.ZRandMember(ctx, "zset", 0, false)
+ v = client.ZRandMember(ctx, "zset", 0)
Expect(v.Err()).NotTo(HaveOccurred())
Expect(v.Val()).To(HaveLen(0))
- var slice []string
- err = client.ZRandMember(ctx, "zset", 1, true).ScanSlice(&slice)
+ kv, err := client.ZRandMemberWithScores(ctx, "zset", 1).Result()
Expect(err).NotTo(HaveOccurred())
- Expect(slice).To(Or(Equal([]string{"one", "1"}), Equal([]string{"two", "2"})))
+ Expect(kv).To(Or(
+ Equal([]redis.Z{{Member: "one", Score: 1}}),
+ Equal([]redis.Z{{Member: "two", Score: 2}}),
+ ))
})
- It("should ZDiff", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZDiff", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
v, err := client.ZDiff(ctx, "zset1", "zset2").Result()
@@ -4210,14 +5329,14 @@ var _ = Describe("Commands", func() {
Expect(v).To(Equal([]string{"two", "three"}))
})
- It("should ZDiffWithScores", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZDiffWithScores", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
v, err := client.ZDiffWithScores(ctx, "zset1", "zset2").Result()
@@ -4234,16 +5353,16 @@ var _ = Describe("Commands", func() {
}))
})
- It("should ZInter", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZInter", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
v, err := client.ZInter(ctx, &redis.ZStore{
@@ -4253,16 +5372,42 @@ var _ = Describe("Commands", func() {
Expect(v).To(Equal([]string{"one", "two"}))
})
- It("should ZInterWithScores", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZInterCard", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ // limit 0 means no limit
+ sInterCard := client.ZInterCard(ctx, 0, "zset1", "zset2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(2)))
+
+ sInterCard = client.ZInterCard(ctx, 1, "zset1", "zset2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(1)))
+
+ sInterCard = client.ZInterCard(ctx, 3, "zset1", "zset2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(2)))
+ })
+
+ It("should ZInterWithScores", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
v, err := client.ZInterWithScores(ctx, &redis.ZStore{
@@ -4283,16 +5428,16 @@ var _ = Describe("Commands", func() {
}))
})
- It("should ZDiffStore", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZDiffStore", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
v, err := client.ZDiffStore(ctx, "out1", "zset1", "zset2").Result()
Expect(err).NotTo(HaveOccurred())
@@ -4338,23 +5483,6 @@ var _ = Describe("Commands", func() {
Expect(id).To(Equal("3-0"))
})
- // TODO remove in v9.
- It("should XTrim", func() {
- n, err := client.XTrim(ctx, "stream", 0).Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(int64(3)))
- })
-
- // TODO remove in v9.
- It("should XTrimApprox", func() {
- n, err := client.XTrimApprox(ctx, "stream", 0).Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(int64(3)))
- })
-
- // TODO XTrimMaxLenApprox/XTrimMinIDApprox There is a bug in the limit parameter.
- // TODO Don't test it for now.
- // TODO link: https://github.com/redis/redis/issues/9046
It("should XTrimMaxLen", func() {
n, err := client.XTrimMaxLen(ctx, "stream", 0).Result()
Expect(err).NotTo(HaveOccurred())
@@ -4396,9 +5524,6 @@ var _ = Describe("Commands", func() {
}))
})
- // TODO XAdd There is a bug in the limit parameter.
- // TODO Don't test it for now.
- // TODO link: https://github.com/redis/redis/issues/9046
It("should XAdd with MaxLen", func() {
id, err := client.XAdd(ctx, &redis.XAddArgs{
Stream: "stream",
@@ -4816,13 +5941,22 @@ var _ = Describe("Commands", func() {
res.RadixTreeNodes = 0
Expect(res).To(Equal(&redis.XInfoStream{
- Length: 3,
- RadixTreeKeys: 0,
- RadixTreeNodes: 0,
- Groups: 2,
- LastGeneratedID: "3-0",
- FirstEntry: redis.XMessage{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
- LastEntry: redis.XMessage{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ Length: 3,
+ RadixTreeKeys: 0,
+ RadixTreeNodes: 0,
+ Groups: 2,
+ LastGeneratedID: "3-0",
+ MaxDeletedEntryID: "0-0",
+ EntriesAdded: 3,
+ FirstEntry: redis.XMessage{
+ ID: "1-0",
+ Values: map[string]interface{}{"uno": "un"},
+ },
+ LastEntry: redis.XMessage{
+ ID: "3-0",
+ Values: map[string]interface{}{"tres": "troix"},
+ },
+ RecordedFirstEntryID: "1-0",
}))
// stream is empty
@@ -4836,13 +5970,16 @@ var _ = Describe("Commands", func() {
res.RadixTreeNodes = 0
Expect(res).To(Equal(&redis.XInfoStream{
- Length: 0,
- RadixTreeKeys: 0,
- RadixTreeNodes: 0,
- Groups: 2,
- LastGeneratedID: "3-0",
- FirstEntry: redis.XMessage{},
- LastEntry: redis.XMessage{},
+ Length: 0,
+ RadixTreeKeys: 0,
+ RadixTreeNodes: 0,
+ Groups: 2,
+ LastGeneratedID: "3-0",
+ MaxDeletedEntryID: "3-0",
+ EntriesAdded: 3,
+ FirstEntry: redis.XMessage{},
+ LastEntry: redis.XMessage{},
+ RecordedFirstEntryID: "0-0",
}))
})
@@ -4862,7 +5999,9 @@ var _ = Describe("Commands", func() {
}
for k3, c := range g.Consumers {
Expect(now.Sub(c.SeenTime)).To(BeNumerically("<=", maxElapsed))
+ Expect(now.Sub(c.ActiveTime)).To(BeNumerically("<=", maxElapsed))
res.Groups[k].Consumers[k3].SeenTime = time.Time{}
+ res.Groups[k].Consumers[k3].ActiveTime = time.Time{}
for k4, p := range c.Pending {
Expect(now.Sub(p.DeliveryTime)).To(BeNumerically("<=", maxElapsed))
@@ -4871,114 +6010,104 @@ var _ = Describe("Commands", func() {
}
}
- Expect(res).To(Equal(&redis.XInfoStreamFull{
- Length: 3,
- RadixTreeKeys: 0,
- RadixTreeNodes: 0,
- LastGeneratedID: "3-0",
- Entries: []redis.XMessage{
- {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
- {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
- },
- Groups: []redis.XInfoStreamGroup{
- {
- Name: "group1",
- LastDeliveredID: "3-0",
- PelCount: 3,
- Pending: []redis.XInfoStreamGroupPending{
- {
- ID: "1-0",
- Consumer: "consumer1",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- {
- ID: "2-0",
- Consumer: "consumer1",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
+ Expect(res.Groups).To(Equal([]redis.XInfoStreamGroup{
+ {
+ Name: "group1",
+ LastDeliveredID: "3-0",
+ EntriesRead: 3,
+ Lag: 0,
+ PelCount: 3,
+ Pending: []redis.XInfoStreamGroupPending{
+ {ID: "1-0", Consumer: "consumer1", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ {ID: "2-0", Consumer: "consumer1", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ },
+ Consumers: []redis.XInfoStreamConsumer{
+ {
+ Name: "consumer1",
+ SeenTime: time.Time{},
+ ActiveTime: time.Time{},
+ PelCount: 2,
+ Pending: []redis.XInfoStreamConsumerPending{
+ {ID: "1-0", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ {ID: "2-0", DeliveryTime: time.Time{}, DeliveryCount: 1},
},
},
- Consumers: []redis.XInfoStreamConsumer{
- {
- Name: "consumer1",
- SeenTime: time.Time{},
- PelCount: 2,
- Pending: []redis.XInfoStreamConsumerPending{
- {
- ID: "1-0",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- {
- ID: "2-0",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- },
- },
- {
- Name: "consumer2",
- SeenTime: time.Time{},
- PelCount: 1,
- Pending: []redis.XInfoStreamConsumerPending{
- {
- ID: "3-0",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- },
+ {
+ Name: "consumer2",
+ SeenTime: time.Time{},
+ ActiveTime: time.Time{},
+ PelCount: 1,
+ Pending: []redis.XInfoStreamConsumerPending{
+ {ID: "3-0", DeliveryTime: time.Time{}, DeliveryCount: 1},
},
},
},
- {
- Name: "group2",
- LastDeliveredID: "3-0",
- PelCount: 2,
- Pending: []redis.XInfoStreamGroupPending{
- {
- ID: "2-0",
- Consumer: "consumer1",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- {
- ID: "3-0",
- Consumer: "consumer1",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- },
- Consumers: []redis.XInfoStreamConsumer{
- {
- Name: "consumer1",
- SeenTime: time.Time{},
- PelCount: 2,
- Pending: []redis.XInfoStreamConsumerPending{
- {
- ID: "2-0",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- {
- ID: "3-0",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- },
+ },
+ {
+ Name: "group2",
+ LastDeliveredID: "3-0",
+ EntriesRead: 3,
+ Lag: 0,
+ PelCount: 2,
+ Pending: []redis.XInfoStreamGroupPending{
+ {ID: "2-0", Consumer: "consumer1", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ {ID: "3-0", Consumer: "consumer1", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ },
+ Consumers: []redis.XInfoStreamConsumer{
+ {
+ Name: "consumer1",
+ SeenTime: time.Time{},
+ ActiveTime: time.Time{},
+ PelCount: 2,
+ Pending: []redis.XInfoStreamConsumerPending{
+ {ID: "2-0", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ {ID: "3-0", DeliveryTime: time.Time{}, DeliveryCount: 1},
},
},
},
},
}))
+
+ // entries-read = nil
+ Expect(client.Del(ctx, "xinfo-stream-full-stream").Err()).NotTo(HaveOccurred())
+ id, err := client.XAdd(ctx, &redis.XAddArgs{
+ Stream: "xinfo-stream-full-stream",
+ ID: "*",
+ Values: []any{"k1", "v1"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(client.XGroupCreateMkStream(ctx, "xinfo-stream-full-stream", "xinfo-stream-full-group", "0").Err()).NotTo(HaveOccurred())
+ res, err = client.XInfoStreamFull(ctx, "xinfo-stream-full-stream", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(&redis.XInfoStreamFull{
+ Length: 1,
+ RadixTreeKeys: 1,
+ RadixTreeNodes: 2,
+ LastGeneratedID: id,
+ MaxDeletedEntryID: "0-0",
+ EntriesAdded: 1,
+ Entries: []redis.XMessage{{ID: id, Values: map[string]any{"k1": "v1"}}},
+ Groups: []redis.XInfoStreamGroup{
+ {
+ Name: "xinfo-stream-full-group",
+ LastDeliveredID: "0-0",
+ EntriesRead: 0,
+ Lag: 1,
+ PelCount: 0,
+ Pending: []redis.XInfoStreamGroupPending{},
+ Consumers: []redis.XInfoStreamConsumer{},
+ },
+ },
+ RecordedFirstEntryID: id,
+ }))
})
It("should XINFO GROUPS", func() {
res, err := client.XInfoGroups(ctx, "stream").Result()
Expect(err).NotTo(HaveOccurred())
Expect(res).To(Equal([]redis.XInfoGroup{
- {Name: "group1", Consumers: 2, Pending: 3, LastDeliveredID: "3-0"},
- {Name: "group2", Consumers: 1, Pending: 2, LastDeliveredID: "3-0"},
+ {Name: "group1", Consumers: 2, Pending: 3, LastDeliveredID: "3-0", EntriesRead: 3},
+ {Name: "group2", Consumers: 1, Pending: 2, LastDeliveredID: "3-0", EntriesRead: 3},
}))
})
@@ -4987,10 +6116,12 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
for i := range res {
res[i].Idle = 0
+ res[i].Inactive = 0
}
+
Expect(res).To(Equal([]redis.XInfoConsumer{
- {Name: "consumer1", Pending: 2, Idle: 0},
- {Name: "consumer2", Pending: 1, Idle: 0},
+ {Name: "consumer1", Pending: 2, Idle: 0, Inactive: 0},
+ {Name: "consumer2", Pending: 1, Idle: 0, Inactive: 0},
}))
})
})
@@ -5028,7 +6159,7 @@ var _ = Describe("Commands", func() {
Expect(res[1].Name).To(Equal("Catania"))
})
- It("should geo radius and store the result", func() {
+ It("should geo radius and store the result", Label("NonRedisEnterprise"), func() {
n, err := client.GeoRadiusStore(ctx, "Sicily", 15, 37, &redis.GeoRadiusQuery{
Radius: 200,
Store: "result",
@@ -5048,7 +6179,7 @@ var _ = Describe("Commands", func() {
}))
})
- It("should geo radius and store dist", func() {
+ It("should geo radius and store dist", Label("NonRedisEnterprise"), func() {
n, err := client.GeoRadiusStore(ctx, "Sicily", 15, 37, &redis.GeoRadiusQuery{
Radius: 200,
StoreDist: "result",
@@ -5330,7 +6461,7 @@ var _ = Describe("Commands", func() {
}))
})
- It("should geo search store", func() {
+ It("should geo search store", Label("NonRedisEnterprise"), func() {
q := &redis.GeoSearchStoreQuery{
GeoSearchQuery: redis.GeoSearchQuery{
Longitude: 15,
@@ -5401,7 +6532,7 @@ var _ = Describe("Commands", func() {
{nil, "", nil},
{"hello", "hello", new(string)},
{[]byte("hello"), "hello", new([]byte)},
- {int(1), "1", new(int)},
+ {1, "1", new(int)},
{int8(1), "1", new(int8)},
{int16(1), "1", new(int16)},
{int32(1), "1", new(int32)},
@@ -5412,7 +6543,7 @@ var _ = Describe("Commands", func() {
{uint32(1), "1", new(uint32)},
{uint64(1), "1", new(uint64)},
{float32(1.0), "1", new(float32)},
- {float64(1.0), "1", new(float64)},
+ {1.0, "1", new(float64)},
{true, "1", new(bool)},
{false, "0", new(bool)},
}
@@ -5481,13 +6612,374 @@ var _ = Describe("Commands", func() {
})
})
+ Describe("EvalRO", func() {
+ It("returns keys and values", func() {
+ vals, err := client.EvalRO(
+ ctx,
+ "return {KEYS[1],ARGV[1]}",
+ []string{"key"},
+ "hello",
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]interface{}{"key", "hello"}))
+ })
+
+ It("returns all values after an error", func() {
+ vals, err := client.EvalRO(
+ ctx,
+ `return {12, {err="error"}, "abc"}`,
+ nil,
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]interface{}{int64(12), proto.RedisError("error"), "abc"}))
+ })
+ })
+
+ Describe("Functions", func() {
+ var (
+ q redis.FunctionListQuery
+ lib1Code string
+ lib2Code string
+ lib1 redis.Library
+ lib2 redis.Library
+ )
+
+ BeforeEach(func() {
+ flush := client.FunctionFlush(ctx)
+ Expect(flush.Err()).NotTo(HaveOccurred())
+
+ lib1 = redis.Library{
+ Name: "mylib1",
+ Engine: "LUA",
+ Functions: []redis.Function{
+ {
+ Name: "lib1_func1",
+ Description: "This is the func-1 of lib 1",
+ Flags: []string{"allow-oom", "allow-stale"},
+ },
+ },
+ Code: `#!lua name=%s
+
+ local function f1(keys, args)
+ local hash = keys[1] -- Get the key name
+ local time = redis.call('TIME')[1] -- Get the current time from the Redis server
+
+ -- Add the current timestamp to the arguments that the user passed to the function, stored in args
+ table.insert(args, '_updated_at')
+ table.insert(args, time)
+
+ -- Run HSET with the updated argument list
+ return redis.call('HSET', hash, unpack(args))
+ end
+
+ redis.register_function{
+ function_name='%s',
+ description ='%s',
+ callback=f1,
+ flags={'%s', '%s'}
+ }`,
+ }
+
+ lib2 = redis.Library{
+ Name: "mylib2",
+ Engine: "LUA",
+ Functions: []redis.Function{
+ {
+ Name: "lib2_func1",
+ Flags: []string{},
+ },
+ {
+ Name: "lib2_func2",
+ Description: "This is the func-2 of lib 2",
+ Flags: []string{"no-writes"},
+ },
+ },
+ Code: `#!lua name=%s
+
+ local function f1(keys, args)
+ return 'Function 1'
+ end
+
+ local function f2(keys, args)
+ return 'Function 2'
+ end
+
+ redis.register_function('%s', f1)
+ redis.register_function{
+ function_name='%s',
+ description ='%s',
+ callback=f2,
+ flags={'%s'}
+ }`,
+ }
+
+ lib1Code = fmt.Sprintf(lib1.Code, lib1.Name, lib1.Functions[0].Name,
+ lib1.Functions[0].Description, lib1.Functions[0].Flags[0], lib1.Functions[0].Flags[1])
+ lib2Code = fmt.Sprintf(lib2.Code, lib2.Name, lib2.Functions[0].Name,
+ lib2.Functions[1].Name, lib2.Functions[1].Description, lib2.Functions[1].Flags[0])
+
+ q = redis.FunctionListQuery{}
+ })
+
+ It("Loads a new library", Label("NonRedisEnterprise"), func() {
+ functionLoad := client.FunctionLoad(ctx, lib1Code)
+ Expect(functionLoad.Err()).NotTo(HaveOccurred())
+ Expect(functionLoad.Val()).To(Equal(lib1.Name))
+
+ functionList := client.FunctionList(ctx, q)
+ Expect(functionList.Err()).NotTo(HaveOccurred())
+ Expect(functionList.Val()).To(HaveLen(1))
+ })
+
+ It("Loads and replaces a new library", Label("NonRedisEnterprise"), func() {
+ // Load a library for the first time
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ newFuncName := "replaces_func_name"
+ newFuncDesc := "replaces_func_desc"
+ flag1, flag2 := "allow-stale", "no-cluster"
+ newCode := fmt.Sprintf(lib1.Code, lib1.Name, newFuncName, newFuncDesc, flag1, flag2)
+
+ // And then replace it
+ functionLoadReplace := client.FunctionLoadReplace(ctx, newCode)
+ Expect(functionLoadReplace.Err()).NotTo(HaveOccurred())
+ Expect(functionLoadReplace.Val()).To(Equal(lib1.Name))
+
+ lib, err := client.FunctionList(ctx, q).First()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lib.Functions).To(Equal([]redis.Function{
+ {
+ Name: newFuncName,
+ Description: newFuncDesc,
+ Flags: []string{flag1, flag2},
+ },
+ }))
+ })
+
+ It("Deletes a library", func() {
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.FunctionDelete(ctx, lib1.Name).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.FunctionList(ctx, redis.FunctionListQuery{
+ LibraryNamePattern: lib1.Name,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(HaveLen(0))
+ })
+
+ It("Flushes all libraries", func() {
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.FunctionLoad(ctx, lib2Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.FunctionFlush(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.FunctionList(ctx, q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(HaveLen(0))
+ })
+
+ It("Flushes all libraries asynchronously", func() {
+ functionLoad := client.FunctionLoad(ctx, lib1Code)
+ Expect(functionLoad.Err()).NotTo(HaveOccurred())
+
+ // we only verify the command result.
+ functionFlush := client.FunctionFlushAsync(ctx)
+ Expect(functionFlush.Err()).NotTo(HaveOccurred())
+ })
+
+ It("Kills a running function", func() {
+ functionKill := client.FunctionKill(ctx)
+ Expect(functionKill.Err()).To(MatchError("NOTBUSY No scripts in execution right now."))
+
+ // Add test for a long-running function, once we make the test for `function stats` pass
+ })
+
+ It("Lists registered functions", Label("NonRedisEnterprise"), func() {
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.FunctionList(ctx, redis.FunctionListQuery{
+ LibraryNamePattern: "*",
+ WithCode: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(HaveLen(1))
+ Expect(val[0].Name).To(Equal(lib1.Name))
+ Expect(val[0].Engine).To(Equal(lib1.Engine))
+ Expect(val[0].Code).To(Equal(lib1Code))
+ Expect(val[0].Functions).Should(ConsistOf(lib1.Functions))
+
+ err = client.FunctionLoad(ctx, lib2Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err = client.FunctionList(ctx, redis.FunctionListQuery{
+ WithCode: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(HaveLen(2))
+
+ lib, err := client.FunctionList(ctx, redis.FunctionListQuery{
+ LibraryNamePattern: lib2.Name,
+ WithCode: false,
+ }).First()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lib.Name).To(Equal(lib2.Name))
+ Expect(lib.Code).To(Equal(""))
+
+ _, err = client.FunctionList(ctx, redis.FunctionListQuery{
+ LibraryNamePattern: "non_lib",
+ WithCode: true,
+ }).First()
+ Expect(err).To(Equal(redis.Nil))
+ })
+
+ It("Dump and restores all libraries", Label("NonRedisEnterprise"), func() {
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.FunctionLoad(ctx, lib2Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ dump, err := client.FunctionDump(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dump).NotTo(BeEmpty())
+
+ err = client.FunctionRestore(ctx, dump).Err()
+ Expect(err).To(HaveOccurred())
+
+ err = client.FunctionFlush(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ list, err := client.FunctionList(ctx, q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(list).To(HaveLen(0))
+
+ err = client.FunctionRestore(ctx, dump).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ list, err = client.FunctionList(ctx, q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(list).To(HaveLen(2))
+ })
+
+ It("Calls a function", func() {
+ lib1Code = fmt.Sprintf(lib1.Code, lib1.Name, lib1.Functions[0].Name,
+ lib1.Functions[0].Description, lib1.Functions[0].Flags[0], lib1.Functions[0].Flags[1])
+
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ x := client.FCall(ctx, lib1.Functions[0].Name, []string{"my_hash"}, "a", 1, "b", 2)
+ Expect(x.Err()).NotTo(HaveOccurred())
+ Expect(x.Int()).To(Equal(3))
+ })
+
+ It("Calls a function as read-only", func() {
+ lib1Code = fmt.Sprintf(lib1.Code, lib1.Name, lib1.Functions[0].Name,
+ lib1.Functions[0].Description, lib1.Functions[0].Flags[0], lib1.Functions[0].Flags[1])
+
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ // This function doesn't have a "no-writes" flag
+ x := client.FCallRo(ctx, lib1.Functions[0].Name, []string{"my_hash"}, "a", 1, "b", 2)
+
+ Expect(x.Err()).To(HaveOccurred())
+
+ lib2Code = fmt.Sprintf(lib2.Code, lib2.Name, lib2.Functions[0].Name, lib2.Functions[1].Name,
+ lib2.Functions[1].Description, lib2.Functions[1].Flags[0])
+
+ // This function has a "no-writes" flag
+ err = client.FunctionLoad(ctx, lib2Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ x = client.FCallRo(ctx, lib2.Functions[1].Name, []string{})
+
+ Expect(x.Err()).NotTo(HaveOccurred())
+ Expect(x.Text()).To(Equal("Function 2"))
+ })
+
+ It("Shows function stats", func() {
+ defer client.FunctionKill(ctx)
+
+ // We can not run blocking commands in Redis functions, so we're using an infinite loop,
+ // but we're killing the function after calling FUNCTION STATS
+ lib := redis.Library{
+ Name: "mylib1",
+ Engine: "LUA",
+ Functions: []redis.Function{
+ {
+ Name: "lib1_func1",
+ Description: "This is the func-1 of lib 1",
+ Flags: []string{"no-writes"},
+ },
+ },
+ Code: `#!lua name=%s
+ local function f1(keys, args)
+ local i = 0
+ while true do
+ i = i + 1
+ end
+ end
+
+ redis.register_function{
+ function_name='%s',
+ description ='%s',
+ callback=f1,
+ flags={'%s'}
+ }`,
+ }
+ libCode := fmt.Sprintf(lib.Code, lib.Name, lib.Functions[0].Name,
+ lib.Functions[0].Description, lib.Functions[0].Flags[0])
+ err := client.FunctionLoad(ctx, libCode).Err()
+
+ Expect(err).NotTo(HaveOccurred())
+
+ r, err := client.FunctionStats(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(r.Engines)).To(Equal(1))
+ Expect(r.Running()).To(BeFalse())
+
+ started := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ client2 := redis.NewClient(redisOptions())
+
+ started <- true
+ _, err = client2.FCall(ctx, lib.Functions[0].Name, nil).Result()
+ Expect(err).To(HaveOccurred())
+ }()
+
+ <-started
+ time.Sleep(1 * time.Second)
+ r, err = client.FunctionStats(ctx).Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(r.Engines)).To(Equal(1))
+ rs, isRunning := r.RunningScript()
+ Expect(isRunning).To(BeTrue())
+ Expect(rs.Name).To(Equal(lib.Functions[0].Name))
+ Expect(rs.Duration > 0).To(BeTrue())
+
+ close(started)
+ })
+ })
+
Describe("SlowLogGet", func() {
It("returns slow query result", func() {
const key = "slowlog-log-slower-than"
old := client.ConfigGet(ctx, key).Val()
client.ConfigSet(ctx, key, "0")
- defer client.ConfigSet(ctx, key, old[1].(string))
+ defer client.ConfigSet(ctx, key, old[key])
err := client.Do(ctx, "slowlog", "reset").Err()
Expect(err).NotTo(HaveOccurred())
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/doc.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doc.go
index 5526253..5526253 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/doc.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doc.go
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/README.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/README.md
new file mode 100644
index 0000000..2623c18
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/README.md
@@ -0,0 +1,22 @@
+# Command examples for redis.io
+
+These examples appear on the [Redis documentation](https://redis.io) site as part of the tabbed examples interface.
+
+## How to add examples
+
+- Create a Go test file with a meaningful name in the current folder.
+- Create a single method prefixed with `Example` and write your test in it.
+- Determine the id for the example you're creating and add it as the first line of the file: `// EXAMPLE: set_and_get`.
+- We're using the [Testable Examples](https://go.dev/blog/examples) feature of Go to test the desired output has been written to stdout.
+
+### Special markup
+
+See https://github.com/redis-stack/redis-stack-website#readme for more details.
+
+## How to test the examples
+
+- Start a Redis server locally on port 6379
+- CD into the `doctests` directory
+- Run `go test` to test all examples in the directory.
+- Run `go test filename.go` to test a single file
+
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/lpush_lrange_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/lpush_lrange_test.go
new file mode 100644
index 0000000..1e69f4b
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/lpush_lrange_test.go
@@ -0,0 +1,48 @@
+// EXAMPLE: lpush_and_lrange
+// HIDE_START
+package example_commands_test
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9"
+)
+
+func ExampleClient_LPush_and_lrange() {
+ ctx := context.Background()
+
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password docs
+ DB: 0, // use default DB
+ })
+
+ // HIDE_END
+
+ // REMOVE_START
+ errFlush := rdb.FlushDB(ctx).Err() // Clear the database before each test
+ if errFlush != nil {
+ panic(errFlush)
+ }
+ // REMOVE_END
+
+ listSize, err := rdb.LPush(ctx, "my_bikes", "bike:1", "bike:2").Result()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(listSize)
+
+ value, err := rdb.LRange(ctx, "my_bikes", 0, -1).Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(value)
+ // HIDE_START
+
+ // Output: 2
+ // [bike:2 bike:1]
+}
+
+// HIDE_END
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/set_get_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/set_get_test.go
new file mode 100644
index 0000000..ab3a936
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/set_get_test.go
@@ -0,0 +1,48 @@
+// EXAMPLE: set_and_get
+// HIDE_START
+package example_commands_test
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9"
+)
+
+func ExampleClient_Set_and_get() {
+ ctx := context.Background()
+
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password docs
+ DB: 0, // use default DB
+ })
+
+ // HIDE_END
+
+ // REMOVE_START
+ errFlush := rdb.FlushDB(ctx).Err() // Clear the database before each test
+ if errFlush != nil {
+ panic(errFlush)
+ }
+ // REMOVE_END
+
+ err := rdb.Set(ctx, "bike:1", "Process 134", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println("OK")
+
+ value, err := rdb.Get(ctx, "bike:1").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Printf("The name of the bike is %s", value)
+ // HIDE_START
+
+ // Output: OK
+ // The name of the bike is Process 134
+}
+
+// HIDE_END
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/error.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/error.go
index 521594b..8a59913 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/error.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/error.go
@@ -2,17 +2,29 @@ package redis
import (
"context"
+ "errors"
"io"
"net"
"strings"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
)
// ErrClosed performs any operation on the closed client will return this error.
var ErrClosed = pool.ErrClosed
+// HasErrorPrefix checks if the err is a Redis error and the message contains a prefix.
+func HasErrorPrefix(err error, prefix string) bool {
+ var rErr Error
+ if !errors.As(err, &rErr) {
+ return false
+ }
+ msg := rErr.Error()
+ msg = strings.TrimPrefix(msg, "ERR ") // KVRocks adds such prefix
+ return strings.HasPrefix(msg, prefix)
+}
+
type Error interface {
error
@@ -91,7 +103,7 @@ func isBadConn(err error, allowTimeout bool, addr string) bool {
if allowTimeout {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
- return !netErr.Temporary()
+ return false
}
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_instrumentation_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_instrumentation_test.go
index d66edce..a6069cf 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_instrumentation_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_instrumentation_test.go
@@ -3,32 +3,40 @@ package redis_test
import (
"context"
"fmt"
+ "net"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
type redisHook struct{}
var _ redis.Hook = redisHook{}
-func (redisHook) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- fmt.Printf("starting processing: <%s>\n", cmd)
- return ctx, nil
+func (redisHook) DialHook(hook redis.DialHook) redis.DialHook {
+ return func(ctx context.Context, network, addr string) (net.Conn, error) {
+ fmt.Printf("dialing %s %s\n", network, addr)
+ conn, err := hook(ctx, network, addr)
+ fmt.Printf("finished dialing %s %s\n", network, addr)
+ return conn, err
+ }
}
-func (redisHook) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
- fmt.Printf("finished processing: <%s>\n", cmd)
- return nil
+func (redisHook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ fmt.Printf("starting processing: <%s>\n", cmd)
+ err := hook(ctx, cmd)
+ fmt.Printf("finished processing: <%s>\n", cmd)
+ return err
+ }
}
-func (redisHook) BeforeProcessPipeline(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- fmt.Printf("pipeline starting processing: %v\n", cmds)
- return ctx, nil
-}
-
-func (redisHook) AfterProcessPipeline(ctx context.Context, cmds []redis.Cmder) error {
- fmt.Printf("pipeline finished processing: %v\n", cmds)
- return nil
+func (redisHook) ProcessPipelineHook(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ fmt.Printf("pipeline starting processing: %v\n", cmds)
+ err := hook(ctx, cmds)
+ fmt.Printf("pipeline finished processing: %v\n", cmds)
+ return err
+ }
}
func Example_instrumentation() {
@@ -39,6 +47,8 @@ func Example_instrumentation() {
rdb.Ping(ctx)
// Output: starting processing: <ping: >
+ // dialing tcp :6379
+ // finished dialing tcp :6379
// finished processing: <ping: PONG>
}
@@ -54,6 +64,8 @@ func ExamplePipeline_instrumentation() {
return nil
})
// Output: pipeline starting processing: [ping: ping: ]
+ // dialing tcp :6379
+ // finished dialing tcp :6379
// pipeline finished processing: [ping: PONG ping: PONG]
}
@@ -70,6 +82,8 @@ func ExampleClient_Watch_instrumentation() {
}, "foo")
// Output:
// starting processing: <watch foo: >
+ // dialing tcp :6379
+ // finished dialing tcp :6379
// finished processing: <watch foo: OK>
// starting processing: <ping: >
// finished processing: <ping: PONG>
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_test.go
index f015809..62aa8cb 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_test.go
@@ -7,7 +7,7 @@ import (
"sync"
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var (
@@ -154,8 +154,8 @@ func ExampleClient() {
// missing_key does not exist
}
-func ExampleConn() {
- conn := rdb.Conn(context.Background())
+func ExampleConn_name() {
+ conn := rdb.Conn()
err := conn.ClientSetName(ctx, "foobar").Err()
if err != nil {
@@ -175,6 +175,28 @@ func ExampleConn() {
// Output: foobar
}
+func ExampleConn_client_setInfo_libraryVersion() {
+ conn := rdb.Conn()
+
+ err := conn.ClientSetInfo(ctx, redis.WithLibraryVersion("1.2.3")).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ // Open other connections.
+ for i := 0; i < 10; i++ {
+ go rdb.Ping(ctx)
+ }
+
+ s, err := conn.ClientInfo(ctx).Result()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(s.LibVer)
+ // Output: 1.2.3
+}
+
func ExampleClient_Set() {
// Last argument is expiration. Zero means the key has no
// expiration time.
@@ -190,8 +212,23 @@ func ExampleClient_Set() {
}
}
-func ExampleClient_SetEX() {
- err := rdb.SetEX(ctx, "key", "value", time.Hour).Err()
+func ExampleClient_SetEx() {
+ err := rdb.SetEx(ctx, "key", "value", time.Hour).Err()
+ if err != nil {
+ panic(err)
+ }
+}
+
+func ExampleClient_HSet() {
+ // Set "redis" tag for hash key
+ type ExampleUser struct {
+ Name string `redis:"name"`
+ Age int `redis:"age"`
+ }
+
+ items := ExampleUser{"jane", 22}
+
+ err := rdb.HSet(ctx, "user:1", items).Err()
if err != nil {
panic(err)
}
@@ -212,7 +249,7 @@ func ExampleClient_BLPop() {
panic(err)
}
- // use `rdb.BLPop(0, "queue")` for infinite waiting time
+ // use `rdb.BLPop(ctx, 0, "queue")` for infinite waiting time
result, err := rdb.BLPop(ctx, 1*time.Second, "queue").Result()
if err != nil {
panic(err)
@@ -278,9 +315,38 @@ func ExampleClient_ScanType() {
// Output: found 33 keys
}
-// ExampleStringStringMapCmd_Scan shows how to scan the results of a map fetch
+// ExampleClient_ScanType_hashType uses the keyType "hash".
+func ExampleClient_ScanType_hashType() {
+ rdb.FlushDB(ctx)
+ for i := 0; i < 33; i++ {
+ err := rdb.HSet(context.TODO(), fmt.Sprintf("key%d", i), "value", "foo").Err()
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ var allKeys []string
+ var cursor uint64
+ var err error
+
+ for {
+ var keysFromScan []string
+ keysFromScan, cursor, err = rdb.ScanType(context.TODO(), cursor, "key*", 10, "hash").Result()
+ if err != nil {
+ panic(err)
+ }
+ allKeys = append(allKeys, keysFromScan...)
+ if cursor == 0 {
+ break
+ }
+ }
+ fmt.Printf("%d keys ready for use", len(allKeys))
+ // Output: 33 keys ready for use
+}
+
+// ExampleMapStringStringCmd_Scan shows how to scan the results of a map fetch
// into a struct.
-func ExampleStringStringMapCmd_Scan() {
+func ExampleMapStringStringCmd_Scan() {
rdb.FlushDB(ctx)
err := rdb.HMSet(ctx, "map",
"name", "hello",
@@ -404,7 +470,7 @@ func ExampleClient_TxPipeline() {
}
func ExampleClient_Watch() {
- const maxRetries = 1000
+ const maxRetries = 10000
// Increment transactionally increments key using GET and SET commands.
increment := func(key string) error {
@@ -613,11 +679,16 @@ func ExampleNewUniversalClient_cluster() {
}
func ExampleClient_SlowLogGet() {
+ if RECluster {
+ // skip slowlog test for cluster
+ fmt.Println(2)
+ return
+ }
const key = "slowlog-log-slower-than"
old := rdb.ConfigGet(ctx, key).Val()
rdb.ConfigSet(ctx, key, "0")
- defer rdb.ConfigSet(ctx, key, old[1].(string))
+ defer rdb.ConfigSet(ctx, key, old[key])
if err := rdb.Do(ctx, "slowlog", "reset").Err(); err != nil {
panic(err)
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/export_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/export_test.go
index 49c4b94..3f92983 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/export_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/export_test.go
@@ -6,9 +6,9 @@ import (
"net"
"strings"
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hashtag"
- "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
)
func (c *baseClient) Pool() pool.Pooler {
@@ -60,21 +60,21 @@ func (c *ClusterClient) SwapNodes(ctx context.Context, key string) error {
return nil
}
-func (state *clusterState) IsConsistent(ctx context.Context) bool {
- if len(state.Masters) < 3 {
+func (c *clusterState) IsConsistent(ctx context.Context) bool {
+ if len(c.Masters) < 3 {
return false
}
- for _, master := range state.Masters {
+ for _, master := range c.Masters {
s := master.Client.Info(ctx, "replication").Val()
if !strings.Contains(s, "role:master") {
return false
}
}
- if len(state.Slaves) < 3 {
+ if len(c.Slaves) < 3 {
return false
}
- for _, slave := range state.Slaves {
+ for _, slave := range c.Slaves {
s := slave.Client.Info(ctx, "replication").Val()
if !strings.Contains(s, "role:slave") {
return false
@@ -85,11 +85,20 @@ func (state *clusterState) IsConsistent(ctx context.Context) bool {
}
func GetSlavesAddrByName(ctx context.Context, c *SentinelClient, name string) []string {
- addrs, err := c.Slaves(ctx, name).Result()
+ addrs, err := c.Replicas(ctx, name).Result()
if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
name, err)
return []string{}
}
- return parseSlaveAddrs(addrs, false)
+ return parseReplicaAddrs(addrs, false)
+}
+
+func (c *Ring) ShardByName(name string) *ringShard {
+ shard, _ := c.sharding.GetByName(name)
+ return shard
+}
+
+func (c *ModuleLoadexConfig) ToArgs() []interface{} {
+ return c.toArgs()
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/fuzz/fuzz.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/fuzz/fuzz.go
index 3225d24..d4b2ac4 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/fuzz/fuzz.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/fuzz/fuzz.go
@@ -7,7 +7,7 @@ import (
"context"
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var (
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands.go
new file mode 100644
index 0000000..e0d49a6
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands.go
@@ -0,0 +1,149 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "strings"
+)
+
+type GearsCmdable interface {
+ TFunctionLoad(ctx context.Context, lib string) *StatusCmd
+ TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd
+ TFunctionDelete(ctx context.Context, libName string) *StatusCmd
+ TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd
+ TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd
+ TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd
+ TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd
+ TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd
+ TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd
+}
+
+type TFunctionLoadOptions struct {
+ Replace bool
+ Config string
+}
+
+type TFunctionListOptions struct {
+ Withcode bool
+ Verbose int
+ Library string
+}
+
+type TFCallOptions struct {
+ Keys []string
+ Arguments []string
+}
+
+// TFunctionLoad - load a new JavaScript library into Redis.
+// For more information - https://redis.io/commands/tfunction-load/
+func (c cmdable) TFunctionLoad(ctx context.Context, lib string) *StatusCmd {
+ args := []interface{}{"TFUNCTION", "LOAD", lib}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd {
+ args := []interface{}{"TFUNCTION", "LOAD"}
+ if options != nil {
+ if options.Replace {
+ args = append(args, "REPLACE")
+ }
+ if options.Config != "" {
+ args = append(args, "CONFIG", options.Config)
+ }
+ }
+ args = append(args, lib)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TFunctionDelete - delete a JavaScript library from Redis.
+// For more information - https://redis.io/commands/tfunction-delete/
+func (c cmdable) TFunctionDelete(ctx context.Context, libName string) *StatusCmd {
+ args := []interface{}{"TFUNCTION", "DELETE", libName}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TFunctionList - list the functions with additional information about each function.
+// For more information - https://redis.io/commands/tfunction-list/
+func (c cmdable) TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd {
+ args := []interface{}{"TFUNCTION", "LIST"}
+ cmd := NewMapStringInterfaceSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd {
+ args := []interface{}{"TFUNCTION", "LIST"}
+ if options != nil {
+ if options.Withcode {
+ args = append(args, "WITHCODE")
+ }
+ if options.Verbose != 0 {
+ v := strings.Repeat("v", options.Verbose)
+ args = append(args, v)
+ }
+ if options.Library != "" {
+ args = append(args, "LIBRARY", options.Library)
+ }
+ }
+ cmd := NewMapStringInterfaceSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TFCall - invoke a function.
+// For more information - https://redis.io/commands/tfcall/
+func (c cmdable) TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd {
+ lf := libName + "." + funcName
+ args := []interface{}{"TFCALL", lf, numKeys}
+ cmd := NewCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd {
+ lf := libName + "." + funcName
+ args := []interface{}{"TFCALL", lf, numKeys}
+ if options != nil {
+ for _, key := range options.Keys {
+ args = append(args, key)
+ }
+ for _, key := range options.Arguments {
+ args = append(args, key)
+ }
+ }
+ cmd := NewCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TFCallASYNC - invoke an asynchronous JavaScript function (coroutine).
+// For more information - https://redis.io/commands/TFCallASYNC/
+func (c cmdable) TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd {
+ lf := fmt.Sprintf("%s.%s", libName, funcName)
+ args := []interface{}{"TFCALLASYNC", lf, numKeys}
+ cmd := NewCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd {
+ lf := fmt.Sprintf("%s.%s", libName, funcName)
+ args := []interface{}{"TFCALLASYNC", lf, numKeys}
+ if options != nil {
+ for _, key := range options.Keys {
+ args = append(args, key)
+ }
+ for _, key := range options.Arguments {
+ args = append(args, key)
+ }
+ }
+ cmd := NewCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands_test.go
new file mode 100644
index 0000000..b1117a4
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands_test.go
@@ -0,0 +1,114 @@
+package redis_test
+
+import (
+ "context"
+ "fmt"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9"
+)
+
+func libCode(libName string) string {
+ return fmt.Sprintf("#!js api_version=1.0 name=%s\n redis.registerFunction('foo', ()=>{{return 'bar'}})", libName)
+}
+
+func libCodeWithConfig(libName string) string {
+ lib := `#!js api_version=1.0 name=%s
+
+ var last_update_field_name = "__last_update__"
+
+ if (redis.config.last_update_field_name !== undefined) {
+ if (typeof redis.config.last_update_field_name != 'string') {
+ throw "last_update_field_name must be a string";
+ }
+ last_update_field_name = redis.config.last_update_field_name
+ }
+
+ redis.registerFunction("hset", function(client, key, field, val){
+ // get the current time in ms
+ var curr_time = client.call("time")[0];
+ return client.call('hset', key, field, val, last_update_field_name, curr_time);
+ });`
+ return fmt.Sprintf(lib, libName)
+}
+
+var _ = Describe("RedisGears commands", Label("gears"), func() {
+ ctx := context.TODO()
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(&redis.Options{Addr: ":6379"})
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ client.TFunctionDelete(ctx, "lib1")
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should TFunctionLoad, TFunctionLoadArgs and TFunctionDelete ", Label("gears", "tfunctionload"), func() {
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ opt := &redis.TFunctionLoadOptions{Replace: true, Config: `{"last_update_field_name":"last_update"}`}
+ resultAdd, err = client.TFunctionLoadArgs(ctx, libCodeWithConfig("lib1"), opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ })
+ It("should TFunctionList", Label("gears", "tfunctionlist"), func() {
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ resultList, err := client.TFunctionList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultList[0]["engine"]).To(BeEquivalentTo("js"))
+ opt := &redis.TFunctionListOptions{Withcode: true, Verbose: 2}
+ resultListArgs, err := client.TFunctionListArgs(ctx, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultListArgs[0]["code"]).NotTo(BeEquivalentTo(""))
+ })
+
+ It("should TFCall", Label("gears", "tfcall"), func() {
+ var resultAdd interface{}
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ resultAdd, err = client.TFCall(ctx, "lib1", "foo", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("bar"))
+ })
+
+ It("should TFCallArgs", Label("gears", "tfcallargs"), func() {
+ var resultAdd interface{}
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ opt := &redis.TFCallOptions{Arguments: []string{"foo", "bar"}}
+ resultAdd, err = client.TFCallArgs(ctx, "lib1", "foo", 0, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("bar"))
+ })
+
+ It("should TFCallASYNC", Label("gears", "TFCallASYNC"), func() {
+ var resultAdd interface{}
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ resultAdd, err = client.TFCallASYNC(ctx, "lib1", "foo", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("bar"))
+ })
+
+ It("should TFCallASYNCArgs", Label("gears", "TFCallASYNCargs"), func() {
+ var resultAdd interface{}
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ opt := &redis.TFCallOptions{Arguments: []string{"foo", "bar"}}
+ resultAdd, err = client.TFCallASYNCArgs(ctx, "lib1", "foo", 0, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("bar"))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/generic_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/generic_commands.go
new file mode 100644
index 0000000..dc6c3fe
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/generic_commands.go
@@ -0,0 +1,384 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type GenericCmdable interface {
+ Del(ctx context.Context, keys ...string) *IntCmd
+ Dump(ctx context.Context, key string) *StringCmd
+ Exists(ctx context.Context, keys ...string) *IntCmd
+ Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ ExpireTime(ctx context.Context, key string) *DurationCmd
+ ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ Keys(ctx context.Context, pattern string) *StringSliceCmd
+ Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
+ Move(ctx context.Context, key string, db int) *BoolCmd
+ ObjectFreq(ctx context.Context, key string) *IntCmd
+ ObjectRefCount(ctx context.Context, key string) *IntCmd
+ ObjectEncoding(ctx context.Context, key string) *StringCmd
+ ObjectIdleTime(ctx context.Context, key string) *DurationCmd
+ Persist(ctx context.Context, key string) *BoolCmd
+ PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ PExpireTime(ctx context.Context, key string) *DurationCmd
+ PTTL(ctx context.Context, key string) *DurationCmd
+ RandomKey(ctx context.Context) *StringCmd
+ Rename(ctx context.Context, key, newkey string) *StatusCmd
+ RenameNX(ctx context.Context, key, newkey string) *BoolCmd
+ Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+ SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+ SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
+ SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
+ Touch(ctx context.Context, keys ...string) *IntCmd
+ TTL(ctx context.Context, key string) *DurationCmd
+ Type(ctx context.Context, key string) *StatusCmd
+ Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
+
+ Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+ ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
+}
+
+func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "del"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unlink"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "dump", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "exists"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "")
+}
+
+func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "NX")
+}
+
+func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "XX")
+}
+
+func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "GT")
+}
+
+func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "LT")
+}
+
+func (c cmdable) expire(
+ ctx context.Context, key string, expiration time.Duration, mode string,
+) *BoolCmd {
+ args := make([]interface{}, 3, 4)
+ args[0] = "expire"
+ args[1] = key
+ args[2] = formatSec(ctx, expiration)
+ if mode != "" {
+ args = append(args, mode)
+ }
+
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ExpireTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "expiretime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "keys", pattern)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "migrate",
+ host,
+ port,
+ key,
+ db,
+ formatMs(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "move", key, db)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectFreq(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "object", "freq", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "object", "refcount", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "object", "encoding", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "persist", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ ctx,
+ "pexpireat",
+ key,
+ tm.UnixNano()/int64(time.Millisecond),
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpireTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Millisecond, "pexpiretime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "randomkey")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "rename", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ "replace",
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count int64
+ Get []string
+ Order string
+ Alpha bool
+}
+
+func (sort *Sort) args(command, key string) []interface{} {
+ args := []interface{}{command, key}
+
+ if sort.By != "" {
+ args = append(args, "by", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "limit", sort.Offset, sort.Count)
+ }
+ for _, get := range sort.Get {
+ args = append(args, "get", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.Alpha {
+ args = append(args, "alpha")
+ }
+ return args
+}
+
+func (c cmdable) SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, sort.args("sort_ro", key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, sort.args("sort", key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
+ args := sort.args("sort", key)
+ if store != "" {
+ args = append(args, "store", store)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
+ cmd := NewSliceCmd(ctx, sort.args("sort", key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "touch"
+ for i, key := range keys {
+ args[i+1] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "type", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
+ args := []interface{}{"copy", sourceKey, destKey, "DB", db}
+ if replace {
+ args = append(args, "REPLACE")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ if keyType != "" {
+ args = append(args, "type", keyType)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/geo_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/geo_commands.go
new file mode 100644
index 0000000..f047b98
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/geo_commands.go
@@ -0,0 +1,155 @@
+package redis
+
+import (
+ "context"
+ "errors"
+)
+
+type GeoCmdable interface {
+ GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
+ GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
+ GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
+ GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+ GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
+ GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
+ GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
+ GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
+ GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
+}
+
+func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
+ args := make([]interface{}, 2+3*len(geoLocation))
+ args[0] = "geoadd"
+ args[1] = key
+ for i, eachLoc := range geoLocation {
+ args[2+3*i] = eachLoc.Longitude
+ args[2+3*i+1] = eachLoc.Latitude
+ args[2+3*i+2] = eachLoc.Name
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUS_RO command.
+func (c cmdable) GeoRadius(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusStore is a writing GEORADIUS command.
+func (c cmdable) GeoRadiusStore(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadius", key, longitude, latitude)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
+func (c cmdable) GeoRadiusByMember(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
+func (c cmdable) GeoRadiusByMemberStore(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadiusbymember", key, member)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
+ args := make([]interface{}, 0, 13)
+ args = append(args, "geosearch", key)
+ args = geoSearchArgs(q, args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearchLocation(
+ ctx context.Context, key string, q *GeoSearchLocationQuery,
+) *GeoSearchLocationCmd {
+ args := make([]interface{}, 0, 16)
+ args = append(args, "geosearch", key)
+ args = geoSearchLocationArgs(q, args)
+ cmd := NewGeoSearchLocationCmd(ctx, q, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
+ args := make([]interface{}, 0, 15)
+ args = append(args, "geosearchstore", store, key)
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+ if q.StoreDist {
+ args = append(args, "storedist")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoDist(
+ ctx context.Context, key string, member1, member2, unit string,
+) *FloatCmd {
+ if unit == "" {
+ unit = "km"
+ }
+ cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geohash"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geopos"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewGeoPosCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.mod b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.mod
new file mode 100644
index 0000000..6c65f09
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.mod
@@ -0,0 +1,10 @@
+module github.com/redis/go-redis/v9
+
+go 1.18
+
+require (
+ github.com/bsm/ginkgo/v2 v2.12.0
+ github.com/bsm/gomega v1.27.10
+ github.com/cespare/xxhash/v2 v2.2.0
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
+)
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.sum b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.sum
new file mode 100644
index 0000000..21b4f64
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.sum
@@ -0,0 +1,8 @@
+github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
+github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
+github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
+github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hash_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hash_commands.go
new file mode 100644
index 0000000..2c62a75
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hash_commands.go
@@ -0,0 +1,174 @@
+package redis
+
+import "context"
+
+type HashCmdable interface {
+ HDel(ctx context.Context, key string, fields ...string) *IntCmd
+ HExists(ctx context.Context, key, field string) *BoolCmd
+ HGet(ctx context.Context, key, field string) *StringCmd
+ HGetAll(ctx context.Context, key string) *MapStringStringCmd
+ HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
+ HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
+ HKeys(ctx context.Context, key string) *StringSliceCmd
+ HLen(ctx context.Context, key string) *IntCmd
+ HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
+ HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
+ HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
+ HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
+ HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ HVals(ctx context.Context, key string) *StringSliceCmd
+ HRandField(ctx context.Context, key string, count int) *StringSliceCmd
+ HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd
+}
+
+func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hdel"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hexists", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
+ cmd := NewStringCmd(ctx, "hget", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGetAll(ctx context.Context, key string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "hgetall", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hkeys", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "hlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMGet returns the values for the specified fields in the hash stored at key.
+// It returns an interface{} to distinguish between empty string and nil value.
+func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hmget"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HSet accepts values in following formats:
+//
+// - HSet("myhash", "key1", "value1", "key2", "value2")
+//
+// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
+//
+// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
+//
+// Playing struct With "redis" tag.
+// type MyHash struct { Key1 string `redis:"key1"`; Key2 int `redis:"key2"` }
+//
+// - HSet("myhash", MyHash{"value1", "value2"}) Warn: redis-server >= 4.0
+//
+// For struct, can be a structure pointer type, we only parse the field whose tag is redis.
+// if you don't want the field to be read, you can use the `redis:"-"` flag to ignore it,
+// or you don't need to set the redis tag.
+// For the type of structure field, we only support simple data types:
+// string, int/uint(8,16,32,64), float(32,64), time.Time(to RFC3339Nano), time.Duration(to Nanoseconds ),
+// if you are other more complex or custom data types, please implement the encoding.BinaryMarshaler interface.
+//
+// Note that in older versions of Redis server(redis-server < 4.0), HSet only supports a single key-value pair.
+// redis-docs: https://redis.io/commands/hset (Starting with Redis version 4.0.0: Accepts multiple field and value arguments.)
+// If you are using a Struct type and the number of fields is greater than one,
+// you will receive an error similar to "ERR wrong number of arguments", you can use HMSet as a substitute.
+func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
+func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hmset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hvals", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HRandField redis-server version >= 6.2.0.
+func (c cmdable) HRandField(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hrandfield", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HRandFieldWithValues redis-server version >= 6.2.0.
+func (c cmdable) HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd {
+ cmd := NewKeyValueSliceCmd(ctx, "hrandfield", key, count, "withvalues")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"hscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hyperloglog_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hyperloglog_commands.go
new file mode 100644
index 0000000..5a608fa
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hyperloglog_commands.go
@@ -0,0 +1,42 @@
+package redis
+
+import "context"
+
+type HyperLogLogCmdable interface {
+ PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
+ PFCount(ctx context.Context, keys ...string) *IntCmd
+ PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
+}
+
+func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(els))
+ args[0] = "pfadd"
+ args[1] = key
+ args = appendArgs(args, els)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "pfcount"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "pfmerge"
+ args[1] = dest
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/arg.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/arg.go
index b97fa0d..2e5ca33 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/arg.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/arg.go
@@ -4,6 +4,8 @@ import (
"fmt"
"strconv"
"time"
+
+ "github.com/redis/go-redis/v9/internal/util"
)
func AppendArg(b []byte, v interface{}) []byte {
@@ -11,7 +13,7 @@ func AppendArg(b []byte, v interface{}) []byte {
case nil:
return append(b, "<nil>"...)
case string:
- return appendUTF8String(b, Bytes(v))
+ return appendUTF8String(b, util.StringToBytes(v))
case []byte:
return appendUTF8String(b, v)
case int:
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag.go
index b3a4f21..f13ee81 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag.go
@@ -3,7 +3,7 @@ package hashtag
import (
"strings"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal/rand"
)
const slotNumber = 16384
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag_test.go
index c0b6396..fe4865b 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag_test.go
@@ -3,10 +3,10 @@ package hashtag
import (
"testing"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal/rand"
)
func TestGinkgoSuite(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan.go
index 852c8bd..203ec4a 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan.go
@@ -10,6 +10,12 @@ import (
// decoderFunc represents decoding functions for default built-in types.
type decoderFunc func(reflect.Value, string) error
+// Scanner is the interface implemented by themselves,
+// which will override the decoding behavior of decoderFunc.
+type Scanner interface {
+ ScanRedis(s string) error
+}
+
var (
// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
decoders = []decoderFunc{
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan_test.go
index ab4c0e1..6c9b303 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan_test.go
@@ -4,9 +4,12 @@ import (
"math"
"strconv"
"testing"
+ "time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9/internal/util"
)
type data struct {
@@ -28,6 +31,21 @@ type data struct {
Float float32 `redis:"float"`
Float64 float64 `redis:"float64"`
Bool bool `redis:"bool"`
+ BoolRef *bool `redis:"boolRef"`
+}
+
+type TimeRFC3339Nano struct {
+ time.Time
+}
+
+func (t *TimeRFC3339Nano) ScanRedis(s string) (err error) {
+ t.Time, err = time.Parse(time.RFC3339Nano, s)
+ return
+}
+
+type TimeData struct {
+ Name string `redis:"name"`
+ Time *TimeRFC3339Nano `redis:"login"`
}
type i []interface{}
@@ -102,10 +120,10 @@ var _ = Describe("Scan", func() {
Expect(Scan(&d, i{"key"}, i{"value"})).NotTo(HaveOccurred())
Expect(d).To(Equal(data{}))
- keys := i{"string", "byte", "int", "int64", "uint", "uint64", "float", "float64", "bool"}
+ keys := i{"string", "byte", "int", "int64", "uint", "uint64", "float", "float64", "bool", "boolRef"}
vals := i{
"str!", "bytes!", "123", "123456789123456789", "456", "987654321987654321",
- "123.456", "123456789123456789.987654321987654321", "1",
+ "123.456", "123456789123456789.987654321987654321", "1", "1",
}
Expect(Scan(&d, keys, vals)).NotTo(HaveOccurred())
Expect(d).To(Equal(data{
@@ -118,6 +136,7 @@ var _ = Describe("Scan", func() {
Float: 123.456,
Float64: 1.2345678912345678e+17,
Bool: true,
+ BoolRef: util.ToPtr(true),
}))
// Scan a different type with the same values to test that
@@ -152,6 +171,7 @@ var _ = Describe("Scan", func() {
Float: 1.0,
Float64: 1.2345678912345678e+17,
Bool: true,
+ BoolRef: util.ToPtr(true),
}))
})
@@ -175,4 +195,26 @@ var _ = Describe("Scan", func() {
Expect(Scan(&d, i{"bool"}, i{""})).To(HaveOccurred())
Expect(Scan(&d, i{"bool"}, i{"123"})).To(HaveOccurred())
})
+
+ It("Implements Scanner", func() {
+ var td TimeData
+
+ now := time.Now()
+ Expect(Scan(&td, i{"name", "login"}, i{"hello", now.Format(time.RFC3339Nano)})).NotTo(HaveOccurred())
+ Expect(td.Name).To(Equal("hello"))
+ Expect(td.Time.UnixNano()).To(Equal(now.UnixNano()))
+ Expect(td.Time.Format(time.RFC3339Nano)).To(Equal(now.Format(time.RFC3339Nano)))
+ })
+
+ It("should time.Time RFC3339Nano", func() {
+ type TimeTime struct {
+ Time time.Time `redis:"time"`
+ }
+
+ now := time.Now()
+
+ var tt TimeTime
+ Expect(Scan(&tt, i{"time"}, i{now.Format(time.RFC3339Nano)})).NotTo(HaveOccurred())
+ Expect(now.Unix()).To(Equal(tt.Time.Unix()))
+ })
})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/structmap.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/structmap.go
index 6839412..1a560e4 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/structmap.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/structmap.go
@@ -1,10 +1,13 @@
package hscan
import (
+ "encoding"
"fmt"
"reflect"
"strings"
"sync"
+
+ "github.com/redis/go-redis/v9/internal/util"
)
// structMap contains the map of struct fields for target structs
@@ -58,7 +61,11 @@ func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
}
// Use the built-in decoder.
- out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
+ kind := f.Type.Kind()
+ if kind == reflect.Pointer {
+ kind = f.Type.Elem().Kind()
+ }
+ out.set(tag, &structField{index: i, fn: decoders[kind]})
}
return out
@@ -84,7 +91,32 @@ func (s StructValue) Scan(key string, value string) error {
if !ok {
return nil
}
- if err := field.fn(s.value.Field(field.index), value); err != nil {
+
+ v := s.value.Field(field.index)
+ isPtr := v.Kind() == reflect.Ptr
+
+ if isPtr && v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if !isPtr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ isPtr = true
+ }
+
+ if isPtr && v.Type().NumMethod() > 0 && v.CanInterface() {
+ switch scan := v.Interface().(type) {
+ case Scanner:
+ return scan.ScanRedis(value)
+ case encoding.TextUnmarshaler:
+ return scan.UnmarshalText(util.StringToBytes(value))
+ }
+ }
+
+ if isPtr {
+ v = v.Elem()
+ }
+
+ if err := field.fn(v, value); err != nil {
t := s.value.Type()
return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal.go
index 4a59c59..e783d13 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal.go
@@ -3,7 +3,7 @@ package internal
import (
"time"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal/rand"
)
func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal_test.go
index bfdcbbb..00bfd6e 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal_test.go
@@ -4,7 +4,7 @@ import (
"testing"
"time"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/gomega"
)
func TestRetryBackoff(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/log.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/log.go
index c8b9213..c8b9213 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/log.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/log.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/once.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/once.go
index 64f4627..b81244f 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/once.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/once.go
@@ -32,7 +32,9 @@ type Once struct {
// Do calls the function f if and only if Do has not been invoked
// without error for this instance of Once. In other words, given
-// var once Once
+//
+// var once Once
+//
// if once.Do(f) is called multiple times, only the first call will
// invoke f, even if f has a different value in each invocation unless
// f returns an error. A new instance of Once is required for each
@@ -41,7 +43,8 @@ type Once struct {
// Do is intended for initialization that must be run exactly once. Since f
// is niladic, it may be necessary to use a function literal to capture the
// arguments to a function to be invoked by Do:
-// err := config.once.Do(func() error { return config.init(filename) })
+//
+// err := config.once.Do(func() error { return config.init(filename) })
func (o *Once) Do(f func() error) error {
if atomic.LoadUint32(&o.done) == 1 {
return nil
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/bench_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/bench_test.go
index dec5d3f..71049f4 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/bench_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/bench_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"time"
- "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/redis/go-redis/v9/internal/pool"
)
type poolGetPutBenchmark struct {
@@ -30,11 +30,10 @@ func BenchmarkPoolGetPut(b *testing.B) {
for _, bm := range benchmarks {
b.Run(bm.String(), func(b *testing.B) {
connPool := pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: bm.poolSize,
- PoolTimeout: time.Second,
- IdleTimeout: time.Hour,
- IdleCheckFrequency: time.Hour,
+ Dialer: dummyDialer,
+ PoolSize: bm.poolSize,
+ PoolTimeout: time.Second,
+ ConnMaxIdleTime: time.Hour,
})
b.ResetTimer()
@@ -74,11 +73,10 @@ func BenchmarkPoolGetRemove(b *testing.B) {
for _, bm := range benchmarks {
b.Run(bm.String(), func(b *testing.B) {
connPool := pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: bm.poolSize,
- PoolTimeout: time.Second,
- IdleTimeout: time.Hour,
- IdleCheckFrequency: time.Hour,
+ Dialer: dummyDialer,
+ PoolSize: bm.poolSize,
+ PoolTimeout: time.Second,
+ ConnMaxIdleTime: time.Hour,
})
b.ResetTimer()
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/conn.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn.go
index 5661659..7f45bc0 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/conn.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn.go
@@ -7,7 +7,7 @@ import (
"sync/atomic"
"time"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal/proto"
)
var noDeadline = time.Time{}
@@ -63,9 +63,13 @@ func (cn *Conn) RemoteAddr() net.Addr {
return nil
}
-func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
- if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
- return err
+func (cn *Conn) WithReader(
+ ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error,
+) error {
+ if timeout >= 0 {
+ if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return err
+ }
}
return fn(cn.rd)
}
@@ -73,8 +77,10 @@ func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(r
func (cn *Conn) WithWriter(
ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
) error {
- if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
- return err
+ if timeout >= 0 {
+ if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return err
+ }
}
if cn.bw.Buffered() > 0 {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check.go
new file mode 100644
index 0000000..83190d3
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check.go
@@ -0,0 +1,49 @@
+//go:build linux || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || illumos
+
+package pool
+
+import (
+ "errors"
+ "io"
+ "net"
+ "syscall"
+ "time"
+)
+
+var errUnexpectedRead = errors.New("unexpected read from socket")
+
+func connCheck(conn net.Conn) error {
+ // Reset previous timeout.
+ _ = conn.SetDeadline(time.Time{})
+
+ sysConn, ok := conn.(syscall.Conn)
+ if !ok {
+ return nil
+ }
+ rawConn, err := sysConn.SyscallConn()
+ if err != nil {
+ return err
+ }
+
+ var sysErr error
+
+ if err := rawConn.Read(func(fd uintptr) bool {
+ var buf [1]byte
+ n, err := syscall.Read(int(fd), buf[:])
+ switch {
+ case n == 0 && err == nil:
+ sysErr = io.EOF
+ case n > 0:
+ sysErr = errUnexpectedRead
+ case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
+ sysErr = nil
+ default:
+ sysErr = err
+ }
+ return true
+ }); err != nil {
+ return err
+ }
+
+ return sysErr
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_dummy.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_dummy.go
new file mode 100644
index 0000000..295da12
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_dummy.go
@@ -0,0 +1,9 @@
+//go:build !linux && !darwin && !dragonfly && !freebsd && !netbsd && !openbsd && !solaris && !illumos
+
+package pool
+
+import "net"
+
+func connCheck(conn net.Conn) error {
+ return nil
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_test.go
new file mode 100644
index 0000000..2ade8a0
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_test.go
@@ -0,0 +1,47 @@
+//go:build linux || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || illumos
+
+package pool
+
+import (
+ "net"
+ "net/http/httptest"
+ "time"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+)
+
+var _ = Describe("tests conn_check with real conns", func() {
+ var ts *httptest.Server
+ var conn net.Conn
+ var err error
+
+ BeforeEach(func() {
+ ts = httptest.NewServer(nil)
+ conn, err = net.DialTimeout(ts.Listener.Addr().Network(), ts.Listener.Addr().String(), time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ ts.Close()
+ })
+
+ It("good conn check", func() {
+ Expect(connCheck(conn)).NotTo(HaveOccurred())
+
+ Expect(conn.Close()).NotTo(HaveOccurred())
+ Expect(connCheck(conn)).To(HaveOccurred())
+ })
+
+ It("bad conn check", func() {
+ Expect(conn.Close()).NotTo(HaveOccurred())
+ Expect(connCheck(conn)).To(HaveOccurred())
+ })
+
+ It("check conn deadline", func() {
+ Expect(conn.SetDeadline(time.Now())).NotTo(HaveOccurred())
+ time.Sleep(time.Millisecond * 10)
+ Expect(connCheck(conn)).NotTo(HaveOccurred())
+ Expect(conn.Close()).NotTo(HaveOccurred())
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/export_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/export_test.go
index 75dd4ad..f3a65f8 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/export_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/export_test.go
@@ -1,9 +1,14 @@
package pool
import (
+ "net"
"time"
)
func (cn *Conn) SetCreatedAt(tm time.Time) {
cn.createdAt = tm
}
+
+func (cn *Conn) NetConn() net.Conn {
+ return cn.netConn
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/main_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/main_test.go
new file mode 100644
index 0000000..d993301
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/main_test.go
@@ -0,0 +1,123 @@
+package pool_test
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+)
+
+func TestGinkgoSuite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "pool")
+}
+
+func perform(n int, cbs ...func(int)) {
+ var wg sync.WaitGroup
+ for _, cb := range cbs {
+ for i := 0; i < n; i++ {
+ wg.Add(1)
+ go func(cb func(int), i int) {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ cb(i)
+ }(cb, i)
+ }
+ }
+ wg.Wait()
+}
+
+func dummyDialer(context.Context) (net.Conn, error) {
+ return newDummyConn(), nil
+}
+
+func newDummyConn() net.Conn {
+ return &dummyConn{
+ rawConn: new(dummyRawConn),
+ }
+}
+
+var (
+ _ net.Conn = (*dummyConn)(nil)
+ _ syscall.Conn = (*dummyConn)(nil)
+)
+
+type dummyConn struct {
+ rawConn *dummyRawConn
+}
+
+func (d *dummyConn) SyscallConn() (syscall.RawConn, error) {
+ return d.rawConn, nil
+}
+
+var errDummy = fmt.Errorf("dummyConn err")
+
+func (d *dummyConn) Read(b []byte) (n int, err error) {
+ return 0, errDummy
+}
+
+func (d *dummyConn) Write(b []byte) (n int, err error) {
+ return 0, errDummy
+}
+
+func (d *dummyConn) Close() error {
+ d.rawConn.Close()
+ return nil
+}
+
+func (d *dummyConn) LocalAddr() net.Addr {
+ return &net.TCPAddr{}
+}
+
+func (d *dummyConn) RemoteAddr() net.Addr {
+ return &net.TCPAddr{}
+}
+
+func (d *dummyConn) SetDeadline(t time.Time) error {
+ return nil
+}
+
+func (d *dummyConn) SetReadDeadline(t time.Time) error {
+ return nil
+}
+
+func (d *dummyConn) SetWriteDeadline(t time.Time) error {
+ return nil
+}
+
+var _ syscall.RawConn = (*dummyRawConn)(nil)
+
+type dummyRawConn struct {
+ mu sync.Mutex
+ closed bool
+}
+
+func (d *dummyRawConn) Control(f func(fd uintptr)) error {
+ return nil
+}
+
+func (d *dummyRawConn) Read(f func(fd uintptr) (done bool)) error {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ if d.closed {
+ return fmt.Errorf("dummyRawConn closed")
+ }
+ return nil
+}
+
+func (d *dummyRawConn) Write(f func(fd uintptr) (done bool)) error {
+ return nil
+}
+
+func (d *dummyRawConn) Close() {
+ d.mu.Lock()
+ d.closed = true
+ d.mu.Unlock()
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool.go
index 44a4e77..2125f3e 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool.go
@@ -8,13 +8,17 @@ import (
"sync/atomic"
"time"
- "github.com/go-redis/redis/v8/internal"
+ "github.com/redis/go-redis/v9/internal"
)
var (
// ErrClosed performs any operation on the closed client will return this error.
ErrClosed = errors.New("redis: client is closed")
+ // ErrPoolExhausted is returned from a pool connection method
+ // when the maximum number of database connections in the pool has been reached.
+ ErrPoolExhausted = errors.New("redis: connection pool exhausted")
+
// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
ErrPoolTimeout = errors.New("redis: connection pool timeout")
)
@@ -54,16 +58,16 @@ type Pooler interface {
}
type Options struct {
- Dialer func(context.Context) (net.Conn, error)
- OnClose func(*Conn) error
+ Dialer func(context.Context) (net.Conn, error)
- PoolFIFO bool
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
+ PoolFIFO bool
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
}
type lastDialErrorWrap struct {
@@ -71,66 +75,67 @@ type lastDialErrorWrap struct {
}
type ConnPool struct {
- opt *Options
+ cfg *Options
dialErrorsNum uint32 // atomic
-
lastDialError atomic.Value
queue chan struct{}
- connsMu sync.Mutex
- conns []*Conn
- idleConns []*Conn
+ connsMu sync.Mutex
+ conns []*Conn
+ idleConns []*Conn
+
poolSize int
idleConnsLen int
stats Stats
- _closed uint32 // atomic
- closedCh chan struct{}
+ _closed uint32 // atomic
}
var _ Pooler = (*ConnPool)(nil)
func NewConnPool(opt *Options) *ConnPool {
p := &ConnPool{
- opt: opt,
+ cfg: opt,
queue: make(chan struct{}, opt.PoolSize),
conns: make([]*Conn, 0, opt.PoolSize),
idleConns: make([]*Conn, 0, opt.PoolSize),
- closedCh: make(chan struct{}),
}
p.connsMu.Lock()
p.checkMinIdleConns()
p.connsMu.Unlock()
- if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
- go p.reaper(opt.IdleCheckFrequency)
- }
-
return p
}
func (p *ConnPool) checkMinIdleConns() {
- if p.opt.MinIdleConns == 0 {
+ if p.cfg.MinIdleConns == 0 {
return
}
- for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
- p.poolSize++
- p.idleConnsLen++
-
- go func() {
- err := p.addIdleConn()
- if err != nil && err != ErrClosed {
- p.connsMu.Lock()
- p.poolSize--
- p.idleConnsLen--
- p.connsMu.Unlock()
- }
- }()
+ for p.poolSize < p.cfg.PoolSize && p.idleConnsLen < p.cfg.MinIdleConns {
+ select {
+ case p.queue <- struct{}{}:
+ p.poolSize++
+ p.idleConnsLen++
+
+ go func() {
+ err := p.addIdleConn()
+ if err != nil && err != ErrClosed {
+ p.connsMu.Lock()
+ p.poolSize--
+ p.idleConnsLen--
+ p.connsMu.Unlock()
+ }
+
+ p.freeTurn()
+ }()
+ default:
+ return
+ }
}
}
@@ -159,6 +164,17 @@ func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
}
func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ p.connsMu.Lock()
+ if p.cfg.MaxActiveConns > 0 && p.poolSize >= p.cfg.MaxActiveConns {
+ p.connsMu.Unlock()
+ return nil, ErrPoolExhausted
+ }
+ p.connsMu.Unlock()
+
cn, err := p.dialConn(ctx, pooled)
if err != nil {
return nil, err
@@ -167,16 +183,15 @@ func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
p.connsMu.Lock()
defer p.connsMu.Unlock()
- // It is not allowed to add new connections to the closed connection pool.
- if p.closed() {
+ if p.cfg.MaxActiveConns > 0 && p.poolSize >= p.cfg.MaxActiveConns {
_ = cn.Close()
- return nil, ErrClosed
+ return nil, ErrPoolExhausted
}
p.conns = append(p.conns, cn)
if pooled {
// If pool is full remove the cn on next Put.
- if p.poolSize >= p.opt.PoolSize {
+ if p.poolSize >= p.cfg.PoolSize {
cn.pooled = false
} else {
p.poolSize++
@@ -191,14 +206,14 @@ func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
return nil, ErrClosed
}
- if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
+ if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.cfg.PoolSize) {
return nil, p.getLastDialError()
}
- netConn, err := p.opt.Dialer(ctx)
+ netConn, err := p.cfg.Dialer(ctx)
if err != nil {
p.setLastDialError(err)
- if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
+ if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.cfg.PoolSize) {
go p.tryDial()
}
return nil, err
@@ -215,7 +230,7 @@ func (p *ConnPool) tryDial() {
return
}
- conn, err := p.opt.Dialer(context.Background())
+ conn, err := p.cfg.Dialer(context.Background())
if err != nil {
p.setLastDialError(err)
time.Sleep(time.Second)
@@ -256,6 +271,7 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
p.connsMu.Unlock()
if err != nil {
+ p.freeTurn()
return nil, err
}
@@ -263,7 +279,7 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
break
}
- if p.isStaleConn(cn) {
+ if !p.isHealthyConn(cn) {
_ = p.CloseConn(cn)
continue
}
@@ -283,10 +299,6 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
return newcn, nil
}
-func (p *ConnPool) getTurn() {
- p.queue <- struct{}{}
-}
-
func (p *ConnPool) waitTurn(ctx context.Context) error {
select {
case <-ctx.Done():
@@ -301,7 +313,7 @@ func (p *ConnPool) waitTurn(ctx context.Context) error {
}
timer := timers.Get().(*time.Timer)
- timer.Reset(p.opt.PoolTimeout)
+ timer.Reset(p.cfg.PoolTimeout)
select {
case <-ctx.Done():
@@ -337,7 +349,7 @@ func (p *ConnPool) popIdle() (*Conn, error) {
}
var cn *Conn
- if p.opt.PoolFIFO {
+ if p.cfg.PoolFIFO {
cn = p.idleConns[0]
copy(p.idleConns, p.idleConns[1:])
p.idleConns = p.idleConns[:n-1]
@@ -363,14 +375,28 @@ func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
return
}
+ var shouldCloseConn bool
+
p.connsMu.Lock()
- p.idleConns = append(p.idleConns, cn)
- p.idleConnsLen++
+
+ if p.cfg.MaxIdleConns == 0 || p.idleConnsLen < p.cfg.MaxIdleConns {
+ p.idleConns = append(p.idleConns, cn)
+ p.idleConnsLen++
+ } else {
+ p.removeConn(cn)
+ shouldCloseConn = true
+ }
+
p.connsMu.Unlock()
+
p.freeTurn()
+
+ if shouldCloseConn {
+ _ = p.closeConn(cn)
+ }
}
-func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+func (p *ConnPool) Remove(_ context.Context, cn *Conn, reason error) {
p.removeConnWithLock(cn)
p.freeTurn()
_ = p.closeConn(cn)
@@ -383,8 +409,8 @@ func (p *ConnPool) CloseConn(cn *Conn) error {
func (p *ConnPool) removeConnWithLock(cn *Conn) {
p.connsMu.Lock()
+ defer p.connsMu.Unlock()
p.removeConn(cn)
- p.connsMu.Unlock()
}
func (p *ConnPool) removeConn(cn *Conn) {
@@ -395,15 +421,13 @@ func (p *ConnPool) removeConn(cn *Conn) {
p.poolSize--
p.checkMinIdleConns()
}
- return
+ break
}
}
+ atomic.AddUint32(&p.stats.StaleConns, 1)
}
func (p *ConnPool) closeConn(cn *Conn) error {
- if p.opt.OnClose != nil {
- _ = p.opt.OnClose(cn)
- }
return cn.Close()
}
@@ -424,14 +448,13 @@ func (p *ConnPool) IdleLen() int {
}
func (p *ConnPool) Stats() *Stats {
- idleLen := p.IdleLen()
return &Stats{
Hits: atomic.LoadUint32(&p.stats.Hits),
Misses: atomic.LoadUint32(&p.stats.Misses),
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
TotalConns: uint32(p.Len()),
- IdleConns: uint32(idleLen),
+ IdleConns: uint32(p.IdleLen()),
StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
}
}
@@ -459,7 +482,6 @@ func (p *ConnPool) Close() error {
if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
return ErrClosed
}
- close(p.closedCh)
var firstErr error
p.connsMu.Lock()
@@ -477,81 +499,20 @@ func (p *ConnPool) Close() error {
return firstErr
}
-func (p *ConnPool) reaper(frequency time.Duration) {
- ticker := time.NewTicker(frequency)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- // It is possible that ticker and closedCh arrive together,
- // and select pseudo-randomly pick ticker case, we double
- // check here to prevent being executed after closed.
- if p.closed() {
- return
- }
- _, err := p.ReapStaleConns()
- if err != nil {
- internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
- continue
- }
- case <-p.closedCh:
- return
- }
- }
-}
-
-func (p *ConnPool) ReapStaleConns() (int, error) {
- var n int
- for {
- p.getTurn()
-
- p.connsMu.Lock()
- cn := p.reapStaleConn()
- p.connsMu.Unlock()
-
- p.freeTurn()
-
- if cn != nil {
- _ = p.closeConn(cn)
- n++
- } else {
- break
- }
- }
- atomic.AddUint32(&p.stats.StaleConns, uint32(n))
- return n, nil
-}
-
-func (p *ConnPool) reapStaleConn() *Conn {
- if len(p.idleConns) == 0 {
- return nil
- }
+func (p *ConnPool) isHealthyConn(cn *Conn) bool {
+ now := time.Now()
- cn := p.idleConns[0]
- if !p.isStaleConn(cn) {
- return nil
+ if p.cfg.ConnMaxLifetime > 0 && now.Sub(cn.createdAt) >= p.cfg.ConnMaxLifetime {
+ return false
}
-
- p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
- p.idleConnsLen--
- p.removeConn(cn)
-
- return cn
-}
-
-func (p *ConnPool) isStaleConn(cn *Conn) bool {
- if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
+ if p.cfg.ConnMaxIdleTime > 0 && now.Sub(cn.UsedAt()) >= p.cfg.ConnMaxIdleTime {
return false
}
- now := time.Now()
- if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
- return true
- }
- if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
- return true
+ if connCheck(cn.netConn) != nil {
+ return false
}
- return false
+ cn.SetUsedAt(now)
+ return true
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_single.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_single.go
index 5a3fde1..5a3fde1 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_single.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_single.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_sticky.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_sticky.go
index 3adb99b..3adb99b 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_sticky.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_sticky.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_test.go
index 423a783..76dec99 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_test.go
@@ -7,10 +7,10 @@ import (
"testing"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/redis/go-redis/v9/internal/pool"
)
var _ = Describe("ConnPool", func() {
@@ -19,11 +19,10 @@ var _ = Describe("ConnPool", func() {
BeforeEach(func() {
connPool = pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: 10,
- PoolTimeout: time.Hour,
- IdleTimeout: time.Millisecond,
- IdleCheckFrequency: time.Millisecond,
+ Dialer: dummyDialer,
+ PoolSize: 10,
+ PoolTimeout: time.Hour,
+ ConnMaxIdleTime: time.Millisecond,
})
})
@@ -45,11 +44,10 @@ var _ = Describe("ConnPool", func() {
<-closedChan
return &net.TCPConn{}, nil
},
- PoolSize: 10,
- PoolTimeout: time.Hour,
- IdleTimeout: time.Millisecond,
- IdleCheckFrequency: time.Millisecond,
- MinIdleConns: minIdleConns,
+ PoolSize: 10,
+ PoolTimeout: time.Hour,
+ ConnMaxIdleTime: time.Millisecond,
+ MinIdleConns: minIdleConns,
})
wg.Wait()
Expect(connPool.Close()).NotTo(HaveOccurred())
@@ -127,12 +125,11 @@ var _ = Describe("MinIdleConns", func() {
newConnPool := func() *pool.ConnPool {
connPool := pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: poolSize,
- MinIdleConns: minIdleConns,
- PoolTimeout: 100 * time.Millisecond,
- IdleTimeout: -1,
- IdleCheckFrequency: -1,
+ Dialer: dummyDialer,
+ PoolSize: poolSize,
+ MinIdleConns: minIdleConns,
+ PoolTimeout: 100 * time.Millisecond,
+ ConnMaxIdleTime: -1,
})
Eventually(func() int {
return connPool.Len()
@@ -287,130 +284,6 @@ var _ = Describe("MinIdleConns", func() {
})
})
-var _ = Describe("conns reaper", func() {
- const idleTimeout = time.Minute
- const maxAge = time.Hour
-
- ctx := context.Background()
- var connPool *pool.ConnPool
- var conns, staleConns, closedConns []*pool.Conn
-
- assert := func(typ string) {
- BeforeEach(func() {
- closedConns = nil
- connPool = pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: 10,
- IdleTimeout: idleTimeout,
- MaxConnAge: maxAge,
- PoolTimeout: time.Second,
- IdleCheckFrequency: time.Hour,
- OnClose: func(cn *pool.Conn) error {
- closedConns = append(closedConns, cn)
- return nil
- },
- })
-
- conns = nil
-
- // add stale connections
- staleConns = nil
- for i := 0; i < 3; i++ {
- cn, err := connPool.Get(ctx)
- Expect(err).NotTo(HaveOccurred())
- switch typ {
- case "idle":
- cn.SetUsedAt(time.Now().Add(-2 * idleTimeout))
- case "aged":
- cn.SetCreatedAt(time.Now().Add(-2 * maxAge))
- }
- conns = append(conns, cn)
- staleConns = append(staleConns, cn)
- }
-
- // add fresh connections
- for i := 0; i < 3; i++ {
- cn, err := connPool.Get(ctx)
- Expect(err).NotTo(HaveOccurred())
- conns = append(conns, cn)
- }
-
- for _, cn := range conns {
- connPool.Put(ctx, cn)
- }
-
- Expect(connPool.Len()).To(Equal(6))
- Expect(connPool.IdleLen()).To(Equal(6))
-
- n, err := connPool.ReapStaleConns()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(3))
- })
-
- AfterEach(func() {
- _ = connPool.Close()
- Expect(connPool.Len()).To(Equal(0))
- Expect(connPool.IdleLen()).To(Equal(0))
- Expect(len(closedConns)).To(Equal(len(conns)))
- Expect(closedConns).To(ConsistOf(conns))
- })
-
- It("reaps stale connections", func() {
- Expect(connPool.Len()).To(Equal(3))
- Expect(connPool.IdleLen()).To(Equal(3))
- })
-
- It("does not reap fresh connections", func() {
- n, err := connPool.ReapStaleConns()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(0))
- })
-
- It("stale connections are closed", func() {
- Expect(len(closedConns)).To(Equal(len(staleConns)))
- Expect(closedConns).To(ConsistOf(staleConns))
- })
-
- It("pool is functional", func() {
- for j := 0; j < 3; j++ {
- var freeCns []*pool.Conn
- for i := 0; i < 3; i++ {
- cn, err := connPool.Get(ctx)
- Expect(err).NotTo(HaveOccurred())
- Expect(cn).NotTo(BeNil())
- freeCns = append(freeCns, cn)
- }
-
- Expect(connPool.Len()).To(Equal(3))
- Expect(connPool.IdleLen()).To(Equal(0))
-
- cn, err := connPool.Get(ctx)
- Expect(err).NotTo(HaveOccurred())
- Expect(cn).NotTo(BeNil())
- conns = append(conns, cn)
-
- Expect(connPool.Len()).To(Equal(4))
- Expect(connPool.IdleLen()).To(Equal(0))
-
- connPool.Remove(ctx, cn, nil)
-
- Expect(connPool.Len()).To(Equal(3))
- Expect(connPool.IdleLen()).To(Equal(0))
-
- for _, cn := range freeCns {
- connPool.Put(ctx, cn)
- }
-
- Expect(connPool.Len()).To(Equal(3))
- Expect(connPool.IdleLen()).To(Equal(3))
- }
- })
- }
-
- assert("idle")
- assert("aged")
-})
-
var _ = Describe("race", func() {
ctx := context.Background()
var connPool *pool.ConnPool
@@ -430,11 +303,10 @@ var _ = Describe("race", func() {
It("does not happen on Get, Put, and Remove", func() {
connPool = pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: 10,
- PoolTimeout: time.Minute,
- IdleTimeout: time.Millisecond,
- IdleCheckFrequency: time.Millisecond,
+ Dialer: dummyDialer,
+ PoolSize: 10,
+ PoolTimeout: time.Minute,
+ ConnMaxIdleTime: time.Millisecond,
})
perform(C, func(id int) {
@@ -455,4 +327,30 @@ var _ = Describe("race", func() {
}
})
})
+
+ It("limit the number of connections", func() {
+ opt := &pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ return &net.TCPConn{}, nil
+ },
+ PoolSize: 1000,
+ MinIdleConns: 50,
+ PoolTimeout: 3 * time.Second,
+ }
+ p := pool.NewConnPool(opt)
+
+ var wg sync.WaitGroup
+ for i := 0; i < opt.PoolSize; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _, _ = p.Get(ctx)
+ }()
+ }
+ wg.Wait()
+
+ stats := p.Stats()
+ Expect(stats.IdleConns).To(Equal(uint32(0)))
+ Expect(stats.TotalConns).To(Equal(uint32(opt.PoolSize)))
+ })
})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/proto_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/proto_test.go
index c9a820e..87818ca 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/proto_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/proto_test.go
@@ -3,8 +3,8 @@ package proto_test
import (
"testing"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
)
func TestGinkgoSuite(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader.go
new file mode 100644
index 0000000..8d23817
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader.go
@@ -0,0 +1,552 @@
+package proto
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "strconv"
+
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+// redis resp protocol data type.
+const (
+ RespStatus = '+' // +<string>\r\n
+ RespError = '-' // -<string>\r\n
+ RespString = '$' // $<length>\r\n<bytes>\r\n
+ RespInt = ':' // :<number>\r\n
+ RespNil = '_' // _\r\n
+ RespFloat = ',' // ,<floating-point-number>\r\n (golang float)
+ RespBool = '#' // true: #t\r\n false: #f\r\n
+ RespBlobError = '!' // !<length>\r\n<bytes>\r\n
+ RespVerbatim = '=' // =<length>\r\nFORMAT:<bytes>\r\n
+ RespBigInt = '(' // (<big number>\r\n
+ RespArray = '*' // *<len>\r\n... (same as resp2)
+ RespMap = '%' // %<len>\r\n(key)\r\n(value)\r\n... (golang map)
+ RespSet = '~' // ~<len>\r\n... (same as Array)
+ RespAttr = '|' // |<len>\r\n(key)\r\n(value)\r\n... + command reply
+ RespPush = '>' // ><len>\r\n... (same as Array)
+)
+
+// Not used temporarily.
+// Redis has not used these two data types for the time being, and will implement them later.
+// Streamed = "EOF:"
+// StreamedAggregated = '?'
+
+//------------------------------------------------------------------------------
+
+const Nil = RedisError("redis: nil") // nolint:errname
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+func (RedisError) RedisError() {}
+
+func ParseErrorReply(line []byte) error {
+ return RedisError(line[1:])
+}
+
+//------------------------------------------------------------------------------
+
+type Reader struct {
+ rd *bufio.Reader
+}
+
+func NewReader(rd io.Reader) *Reader {
+ return &Reader{
+ rd: bufio.NewReader(rd),
+ }
+}
+
+func (r *Reader) Buffered() int {
+ return r.rd.Buffered()
+}
+
+func (r *Reader) Peek(n int) ([]byte, error) {
+ return r.rd.Peek(n)
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+ r.rd.Reset(rd)
+}
+
+// PeekReplyType returns the data type of the next response without advancing the Reader,
+// and discard the attribute type.
+func (r *Reader) PeekReplyType() (byte, error) {
+ b, err := r.rd.Peek(1)
+ if err != nil {
+ return 0, err
+ }
+ if b[0] == RespAttr {
+ if err = r.DiscardNext(); err != nil {
+ return 0, err
+ }
+ return r.PeekReplyType()
+ }
+ return b[0], nil
+}
+
+// ReadLine Return a valid reply, it will check the protocol or redis error,
+// and discard the attribute type.
+func (r *Reader) ReadLine() ([]byte, error) {
+ line, err := r.readLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case RespError:
+ return nil, ParseErrorReply(line)
+ case RespNil:
+ return nil, Nil
+ case RespBlobError:
+ var blobErr string
+ blobErr, err = r.readStringReply(line)
+ if err == nil {
+ err = RedisError(blobErr)
+ }
+ return nil, err
+ case RespAttr:
+ if err = r.Discard(line); err != nil {
+ return nil, err
+ }
+ return r.ReadLine()
+ }
+
+ // Compatible with RESP2
+ if IsNilReply(line) {
+ return nil, Nil
+ }
+
+ return line, nil
+}
+
+// readLine returns an error if:
+// - there is a pending read error;
+// - or line does not end with \r\n.
+func (r *Reader) readLine() ([]byte, error) {
+ b, err := r.rd.ReadSlice('\n')
+ if err != nil {
+ if err != bufio.ErrBufferFull {
+ return nil, err
+ }
+
+ full := make([]byte, len(b))
+ copy(full, b)
+
+ b, err = r.rd.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+
+ full = append(full, b...) //nolint:makezero
+ b = full
+ }
+ if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
+ return nil, fmt.Errorf("redis: invalid reply: %q", b)
+ }
+ return b[:len(b)-2], nil
+}
+
+func (r *Reader) ReadReply() (interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+
+ switch line[0] {
+ case RespStatus:
+ return string(line[1:]), nil
+ case RespInt:
+ return util.ParseInt(line[1:], 10, 64)
+ case RespFloat:
+ return r.readFloat(line)
+ case RespBool:
+ return r.readBool(line)
+ case RespBigInt:
+ return r.readBigInt(line)
+
+ case RespString:
+ return r.readStringReply(line)
+ case RespVerbatim:
+ return r.readVerb(line)
+
+ case RespArray, RespSet, RespPush:
+ return r.readSlice(line)
+ case RespMap:
+ return r.readMap(line)
+ }
+ return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (r *Reader) readFloat(line []byte) (float64, error) {
+ v := string(line[1:])
+ switch string(line[1:]) {
+ case "inf":
+ return math.Inf(1), nil
+ case "-inf":
+ return math.Inf(-1), nil
+ case "nan", "-nan":
+ return math.NaN(), nil
+ }
+ return strconv.ParseFloat(v, 64)
+}
+
+func (r *Reader) readBool(line []byte) (bool, error) {
+ switch string(line[1:]) {
+ case "t":
+ return true, nil
+ case "f":
+ return false, nil
+ }
+ return false, fmt.Errorf("redis: can't parse bool reply: %q", line)
+}
+
+func (r *Reader) readBigInt(line []byte) (*big.Int, error) {
+ i := new(big.Int)
+ if i, ok := i.SetString(string(line[1:]), 10); ok {
+ return i, nil
+ }
+ return nil, fmt.Errorf("redis: can't parse bigInt reply: %q", line)
+}
+
+func (r *Reader) readStringReply(line []byte) (string, error) {
+ n, err := replyLen(line)
+ if err != nil {
+ return "", err
+ }
+
+ b := make([]byte, n+2)
+ _, err = io.ReadFull(r.rd, b)
+ if err != nil {
+ return "", err
+ }
+
+ return util.BytesToString(b[:n]), nil
+}
+
+func (r *Reader) readVerb(line []byte) (string, error) {
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return "", err
+ }
+ if len(s) < 4 || s[3] != ':' {
+ return "", fmt.Errorf("redis: can't parse verbatim string reply: %q", line)
+ }
+ return s[4:], nil
+}
+
+func (r *Reader) readSlice(line []byte) ([]interface{}, error) {
+ n, err := replyLen(line)
+ if err != nil {
+ return nil, err
+ }
+
+ val := make([]interface{}, n)
+ for i := 0; i < len(val); i++ {
+ v, err := r.ReadReply()
+ if err != nil {
+ if err == Nil {
+ val[i] = nil
+ continue
+ }
+ if err, ok := err.(RedisError); ok {
+ val[i] = err
+ continue
+ }
+ return nil, err
+ }
+ val[i] = v
+ }
+ return val, nil
+}
+
+func (r *Reader) readMap(line []byte) (map[interface{}]interface{}, error) {
+ n, err := replyLen(line)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[interface{}]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := r.ReadReply()
+ if err != nil {
+ return nil, err
+ }
+ v, err := r.ReadReply()
+ if err != nil {
+ if err == Nil {
+ m[k] = nil
+ continue
+ }
+ if err, ok := err.(RedisError); ok {
+ m[k] = err
+ continue
+ }
+ return nil, err
+ }
+ m[k] = v
+ }
+ return m, nil
+}
+
+// -------------------------------
+
+func (r *Reader) ReadInt() (int64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespInt, RespStatus:
+ return util.ParseInt(line[1:], 10, 64)
+ case RespString:
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseInt([]byte(s), 10, 64)
+ case RespBigInt:
+ b, err := r.readBigInt(line)
+ if err != nil {
+ return 0, err
+ }
+ if !b.IsInt64() {
+ return 0, fmt.Errorf("bigInt(%s) value out of range", b.String())
+ }
+ return b.Int64(), nil
+ }
+ return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespInt, RespStatus:
+ return util.ParseUint(line[1:], 10, 64)
+ case RespString:
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseUint([]byte(s), 10, 64)
+ case RespBigInt:
+ b, err := r.readBigInt(line)
+ if err != nil {
+ return 0, err
+ }
+ if !b.IsUint64() {
+ return 0, fmt.Errorf("bigInt(%s) value out of range", b.String())
+ }
+ return b.Uint64(), nil
+ }
+ return 0, fmt.Errorf("redis: can't parse uint reply: %.100q", line)
+}
+
+func (r *Reader) ReadFloat() (float64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespFloat:
+ return r.readFloat(line)
+ case RespStatus:
+ return strconv.ParseFloat(string(line[1:]), 64)
+ case RespString:
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseFloat(s, 64)
+ }
+ return 0, fmt.Errorf("redis: can't parse float reply: %.100q", line)
+}
+
+func (r *Reader) ReadString() (string, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return "", err
+ }
+
+ switch line[0] {
+ case RespStatus, RespInt, RespFloat:
+ return string(line[1:]), nil
+ case RespString:
+ return r.readStringReply(line)
+ case RespBool:
+ b, err := r.readBool(line)
+ return strconv.FormatBool(b), err
+ case RespVerbatim:
+ return r.readVerb(line)
+ case RespBigInt:
+ b, err := r.readBigInt(line)
+ if err != nil {
+ return "", err
+ }
+ return b.String(), nil
+ }
+ return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
+}
+
+func (r *Reader) ReadBool() (bool, error) {
+ s, err := r.ReadString()
+ if err != nil {
+ return false, err
+ }
+ return s == "OK" || s == "1" || s == "true", nil
+}
+
+func (r *Reader) ReadSlice() ([]interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ return r.readSlice(line)
+}
+
+// ReadFixedArrayLen read fixed array length.
+func (r *Reader) ReadFixedArrayLen(fixedLen int) error {
+ n, err := r.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if n != fixedLen {
+ return fmt.Errorf("redis: got %d elements in the array, wanted %d", n, fixedLen)
+ }
+ return nil
+}
+
+// ReadArrayLen Read and return the length of the array.
+func (r *Reader) ReadArrayLen() (int, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespArray, RespSet, RespPush:
+ return replyLen(line)
+ default:
+ return 0, fmt.Errorf("redis: can't parse array/set/push reply: %.100q", line)
+ }
+}
+
+// ReadFixedMapLen reads fixed map length.
+func (r *Reader) ReadFixedMapLen(fixedLen int) error {
+ n, err := r.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ if n != fixedLen {
+ return fmt.Errorf("redis: got %d elements in the map, wanted %d", n, fixedLen)
+ }
+ return nil
+}
+
+// ReadMapLen reads the length of the map type.
+// If responding to the array type (RespArray/RespSet/RespPush),
+// it must be a multiple of 2 and return n/2.
+// Other types will return an error.
+func (r *Reader) ReadMapLen() (int, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespMap:
+ return replyLen(line)
+ case RespArray, RespSet, RespPush:
+ // Some commands and RESP2 protocol may respond to array types.
+ n, err := replyLen(line)
+ if err != nil {
+ return 0, err
+ }
+ if n%2 != 0 {
+ return 0, fmt.Errorf("redis: the length of the array must be a multiple of 2, got: %d", n)
+ }
+ return n / 2, nil
+ default:
+ return 0, fmt.Errorf("redis: can't parse map reply: %.100q", line)
+ }
+}
+
+// DiscardNext read and discard the data represented by the next line.
+func (r *Reader) DiscardNext() error {
+ line, err := r.readLine()
+ if err != nil {
+ return err
+ }
+ return r.Discard(line)
+}
+
+// Discard the data represented by line.
+func (r *Reader) Discard(line []byte) (err error) {
+ if len(line) == 0 {
+ return errors.New("redis: invalid line")
+ }
+ switch line[0] {
+ case RespStatus, RespError, RespInt, RespNil, RespFloat, RespBool, RespBigInt:
+ return nil
+ }
+
+ n, err := replyLen(line)
+ if err != nil && err != Nil {
+ return err
+ }
+
+ switch line[0] {
+ case RespBlobError, RespString, RespVerbatim:
+ // +\r\n
+ _, err = r.rd.Discard(n + 2)
+ return err
+ case RespArray, RespSet, RespPush:
+ for i := 0; i < n; i++ {
+ if err = r.DiscardNext(); err != nil {
+ return err
+ }
+ }
+ return nil
+ case RespMap, RespAttr:
+ // Read key & value.
+ for i := 0; i < n*2; i++ {
+ if err = r.DiscardNext(); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ return fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func replyLen(line []byte) (n int, err error) {
+ n, err = util.Atoi(line[1:])
+ if err != nil {
+ return 0, err
+ }
+
+ if n < -1 {
+ return 0, fmt.Errorf("redis: invalid reply: %q", line)
+ }
+
+ switch line[0] {
+ case RespString, RespVerbatim, RespBlobError,
+ RespArray, RespSet, RespPush, RespMap, RespAttr:
+ if n == -1 {
+ return 0, Nil
+ }
+ }
+ return n, nil
+}
+
+// IsNilReply detects redis.Nil of RESP2.
+func IsNilReply(line []byte) bool {
+ return len(line) == 3 &&
+ (line[0] == RespString || line[0] == RespArray) &&
+ line[1] == '-' && line[2] == '1'
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader_test.go
new file mode 100644
index 0000000..2d5f56c
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader_test.go
@@ -0,0 +1,100 @@
+package proto_test
+
+import (
+ "bytes"
+ "io"
+ "testing"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+func BenchmarkReader_ParseReply_Status(b *testing.B) {
+ benchmarkParseReply(b, "+OK\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Int(b *testing.B) {
+ benchmarkParseReply(b, ":1\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Float(b *testing.B) {
+ benchmarkParseReply(b, ",123.456\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Bool(b *testing.B) {
+ benchmarkParseReply(b, "#t\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_BigInt(b *testing.B) {
+ benchmarkParseReply(b, "(3492890328409238509324850943850943825024385\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Error(b *testing.B) {
+ benchmarkParseReply(b, "-Error message\r\n", true)
+}
+
+func BenchmarkReader_ParseReply_Nil(b *testing.B) {
+ benchmarkParseReply(b, "_\r\n", true)
+}
+
+func BenchmarkReader_ParseReply_BlobError(b *testing.B) {
+ benchmarkParseReply(b, "!21\r\nSYNTAX invalid syntax", true)
+}
+
+func BenchmarkReader_ParseReply_String(b *testing.B) {
+ benchmarkParseReply(b, "$5\r\nhello\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Verb(b *testing.B) {
+ benchmarkParseReply(b, "$9\r\ntxt:hello\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Slice(b *testing.B) {
+ benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Set(b *testing.B) {
+ benchmarkParseReply(b, "~2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Push(b *testing.B) {
+ benchmarkParseReply(b, ">2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Map(b *testing.B) {
+ benchmarkParseReply(b, "%2\r\n$5\r\nhello\r\n$5\r\nworld\r\n+key\r\n+value\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Attr(b *testing.B) {
+ benchmarkParseReply(b, "%1\r\n+key\r\n+value\r\n+hello\r\n", false)
+}
+
+func TestReader_ReadLine(t *testing.T) {
+ original := bytes.Repeat([]byte("a"), 8192)
+ original[len(original)-2] = '\r'
+ original[len(original)-1] = '\n'
+ r := proto.NewReader(bytes.NewReader(original))
+ read, err := r.ReadLine()
+ if err != nil && err != io.EOF {
+ t.Errorf("Should be able to read the full buffer: %v", err)
+ }
+
+ if !bytes.Equal(read, original[:len(original)-2]) {
+ t.Errorf("Values must be equal: %d expected %d", len(read), len(original[:len(original)-2]))
+ }
+}
+
+func benchmarkParseReply(b *testing.B, reply string, wanterr bool) {
+ buf := new(bytes.Buffer)
+ for i := 0; i < b.N; i++ {
+ buf.WriteString(reply)
+ }
+ p := proto.NewReader(buf)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _, err := p.ReadReply()
+ if !wanterr && err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan.go
index 0e99476..5223069 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan.go
@@ -3,13 +3,15 @@ package proto
import (
"encoding"
"fmt"
+ "net"
"reflect"
"time"
- "github.com/go-redis/redis/v8/internal/util"
+ "github.com/redis/go-redis/v9/internal/util"
)
// Scan parses bytes `b` to `v` with appropriate type.
+//
//nolint:gocyclo
func Scan(b []byte, v interface{}) error {
switch v := v.(type) {
@@ -115,6 +117,9 @@ func Scan(b []byte, v interface{}) error {
return nil
case encoding.BinaryUnmarshaler:
return v.UnmarshalBinary(b)
+ case *net.IP:
+ *v = b
+ return nil
default:
return fmt.Errorf(
"redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan_test.go
index 55df550..e0cd2f7 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan_test.go
@@ -3,10 +3,10 @@ package proto_test
import (
"encoding/json"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal/proto"
)
type testScanSliceStruct struct {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer.go
index c426098..78595cc 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer.go
@@ -4,16 +4,17 @@ import (
"encoding"
"fmt"
"io"
+ "net"
"strconv"
"time"
- "github.com/go-redis/redis/v8/internal/util"
+ "github.com/redis/go-redis/v9/internal/util"
)
type writer interface {
io.Writer
io.ByteWriter
- // io.StringWriter
+ // WriteString implement io.StringWriter.
WriteString(s string) (n int, err error)
}
@@ -34,7 +35,7 @@ func NewWriter(wr writer) *Writer {
}
func (w *Writer) WriteArgs(args []interface{}) error {
- if err := w.WriteByte(ArrayReply); err != nil {
+ if err := w.WriteByte(RespArray); err != nil {
return err
}
@@ -64,37 +65,68 @@ func (w *Writer) WriteArg(v interface{}) error {
return w.string("")
case string:
return w.string(v)
+ case *string:
+ return w.string(*v)
case []byte:
return w.bytes(v)
case int:
return w.int(int64(v))
+ case *int:
+ return w.int(int64(*v))
case int8:
return w.int(int64(v))
+ case *int8:
+ return w.int(int64(*v))
case int16:
return w.int(int64(v))
+ case *int16:
+ return w.int(int64(*v))
case int32:
return w.int(int64(v))
+ case *int32:
+ return w.int(int64(*v))
case int64:
return w.int(v)
+ case *int64:
+ return w.int(*v)
case uint:
return w.uint(uint64(v))
+ case *uint:
+ return w.uint(uint64(*v))
case uint8:
return w.uint(uint64(v))
+ case *uint8:
+ return w.uint(uint64(*v))
case uint16:
return w.uint(uint64(v))
+ case *uint16:
+ return w.uint(uint64(*v))
case uint32:
return w.uint(uint64(v))
+ case *uint32:
+ return w.uint(uint64(*v))
case uint64:
return w.uint(v)
+ case *uint64:
+ return w.uint(*v)
case float32:
return w.float(float64(v))
+ case *float32:
+ return w.float(float64(*v))
case float64:
return w.float(v)
+ case *float64:
+ return w.float(*v)
case bool:
if v {
return w.int(1)
}
return w.int(0)
+ case *bool:
+ if *v {
+ return w.int(1)
+ }
+ return w.int(0)
case time.Time:
w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
return w.bytes(w.numBuf)
@@ -106,6 +138,8 @@ func (w *Writer) WriteArg(v interface{}) error {
return err
}
return w.bytes(b)
+ case net.IP:
+ return w.bytes(v)
default:
return fmt.Errorf(
"redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
@@ -113,7 +147,7 @@ func (w *Writer) WriteArg(v interface{}) error {
}
func (w *Writer) bytes(b []byte) error {
- if err := w.WriteByte(StringReply); err != nil {
+ if err := w.WriteByte(RespString); err != nil {
return err
}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer_test.go
new file mode 100644
index 0000000..7c9d208
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer_test.go
@@ -0,0 +1,154 @@
+package proto_test
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "net"
+ "testing"
+ "time"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+type MyType struct{}
+
+var _ encoding.BinaryMarshaler = (*MyType)(nil)
+
+func (t *MyType) MarshalBinary() ([]byte, error) {
+ return []byte("hello"), nil
+}
+
+var _ = Describe("WriteBuffer", func() {
+ var buf *bytes.Buffer
+ var wr *proto.Writer
+
+ BeforeEach(func() {
+ buf = new(bytes.Buffer)
+ wr = proto.NewWriter(buf)
+ })
+
+ It("should write args", func() {
+ err := wr.WriteArgs([]interface{}{
+ "string",
+ 12,
+ 34.56,
+ []byte{'b', 'y', 't', 'e', 's'},
+ true,
+ nil,
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(buf.Bytes()).To(Equal([]byte("*6\r\n" +
+ "$6\r\nstring\r\n" +
+ "$2\r\n12\r\n" +
+ "$5\r\n34.56\r\n" +
+ "$5\r\nbytes\r\n" +
+ "$1\r\n1\r\n" +
+ "$0\r\n" +
+ "\r\n")))
+ })
+
+ It("should append time", func() {
+ tm := time.Date(2019, 1, 1, 9, 45, 10, 222125, time.UTC)
+ err := wr.WriteArgs([]interface{}{tm})
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(buf.Len()).To(Equal(41))
+ })
+
+ It("should append marshalable args", func() {
+ err := wr.WriteArgs([]interface{}{&MyType{}})
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(buf.Len()).To(Equal(15))
+ })
+
+ It("should append net.IP", func() {
+ ip := net.ParseIP("192.168.1.1")
+ err := wr.WriteArgs([]interface{}{ip})
+ Expect(err).NotTo(HaveOccurred())
+ Expect(buf.String()).To(Equal(fmt.Sprintf("*1\r\n$16\r\n%s\r\n", bytes.NewBuffer(ip))))
+ })
+})
+
+type discard struct{}
+
+func (discard) Write(b []byte) (int, error) {
+ return len(b), nil
+}
+
+func (discard) WriteString(s string) (int, error) {
+ return len(s), nil
+}
+
+func (discard) WriteByte(c byte) error {
+ return nil
+}
+
+func BenchmarkWriteBuffer_Append(b *testing.B) {
+ buf := proto.NewWriter(discard{})
+ args := []interface{}{"hello", "world", "foo", "bar"}
+
+ for i := 0; i < b.N; i++ {
+ err := buf.WriteArgs(args)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+var _ = Describe("WriteArg", func() {
+ var buf *bytes.Buffer
+ var wr *proto.Writer
+
+ BeforeEach(func() {
+ buf = new(bytes.Buffer)
+ wr = proto.NewWriter(buf)
+ })
+
+ args := map[any]string{
+ "hello": "$5\r\nhello\r\n",
+ int(10): "$2\r\n10\r\n",
+ util.ToPtr(int(10)): "$2\r\n10\r\n",
+ int8(10): "$2\r\n10\r\n",
+ util.ToPtr(int8(10)): "$2\r\n10\r\n",
+ int16(10): "$2\r\n10\r\n",
+ util.ToPtr(int16(10)): "$2\r\n10\r\n",
+ int32(10): "$2\r\n10\r\n",
+ util.ToPtr(int32(10)): "$2\r\n10\r\n",
+ int64(10): "$2\r\n10\r\n",
+ util.ToPtr(int64(10)): "$2\r\n10\r\n",
+ uint(10): "$2\r\n10\r\n",
+ util.ToPtr(uint(10)): "$2\r\n10\r\n",
+ uint8(10): "$2\r\n10\r\n",
+ util.ToPtr(uint8(10)): "$2\r\n10\r\n",
+ uint16(10): "$2\r\n10\r\n",
+ util.ToPtr(uint16(10)): "$2\r\n10\r\n",
+ uint32(10): "$2\r\n10\r\n",
+ util.ToPtr(uint32(10)): "$2\r\n10\r\n",
+ uint64(10): "$2\r\n10\r\n",
+ util.ToPtr(uint64(10)): "$2\r\n10\r\n",
+ float32(10.3): "$18\r\n10.300000190734863\r\n",
+ util.ToPtr(float32(10.3)): "$18\r\n10.300000190734863\r\n",
+ float64(10.3): "$4\r\n10.3\r\n",
+ util.ToPtr(float64(10.3)): "$4\r\n10.3\r\n",
+ bool(true): "$1\r\n1\r\n",
+ bool(false): "$1\r\n0\r\n",
+ util.ToPtr(bool(true)): "$1\r\n1\r\n",
+ util.ToPtr(bool(false)): "$1\r\n0\r\n",
+ }
+
+ for arg, expect := range args {
+ arg, expect := arg, expect
+ It(fmt.Sprintf("should write arg of type %T", arg), func() {
+ err := wr.WriteArg(arg)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(buf.String()).To(Equal(expect))
+ })
+ }
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/rand/rand.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/rand/rand.go
index 2edccba..2edccba 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/rand/rand.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/rand/rand.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util.go
index e34a7f0..ed81ad7 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util.go
@@ -2,9 +2,10 @@ package internal
import (
"context"
+ "strings"
"time"
- "github.com/go-redis/redis/v8/internal/util"
+ "github.com/redis/go-redis/v9/internal/util"
)
func Sleep(ctx context.Context, dur time.Duration) error {
@@ -44,3 +45,22 @@ func isLower(s string) bool {
}
return true
}
+
+func ReplaceSpaces(s string) string {
+ // Pre-allocate a builder with the same length as s to minimize allocations.
+ // This is a basic optimization; adjust the initial size based on your use case.
+ var builder strings.Builder
+ builder.Grow(len(s))
+
+ for _, char := range s {
+ if char == ' ' {
+ // Replace space with a hyphen.
+ builder.WriteRune('-')
+ } else {
+ // Copy the character as-is.
+ builder.WriteRune(char)
+ }
+ }
+
+ return builder.String()
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/safe.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/safe.go
index 2130711..8178f86 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/safe.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/safe.go
@@ -1,5 +1,4 @@
//go:build appengine
-// +build appengine
package util
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/strconv.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/strconv.go
index db50338..db50338 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/strconv.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/strconv.go
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/type.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/type.go
new file mode 100644
index 0000000..a7ea712
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/type.go
@@ -0,0 +1,5 @@
+package util
+
+func ToPtr[T any](v T) *T {
+ return &v
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/unsafe.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/unsafe.go
index daa8d76..cbcd2cc 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/unsafe.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/unsafe.go
@@ -1,5 +1,4 @@
//go:build !appengine
-// +build !appengine
package util
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util_test.go
new file mode 100644
index 0000000..f090eba
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util_test.go
@@ -0,0 +1,53 @@
+package internal
+
+import (
+ "strings"
+ "testing"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+)
+
+func BenchmarkToLowerStd(b *testing.B) {
+ str := "AaBbCcDdEeFfGgHhIiJjKk"
+ for i := 0; i < b.N; i++ {
+ _ = strings.ToLower(str)
+ }
+}
+
+// util.ToLower is 3x faster than strings.ToLower.
+func BenchmarkToLowerInternal(b *testing.B) {
+ str := "AaBbCcDdEeFfGgHhIiJjKk"
+ for i := 0; i < b.N; i++ {
+ _ = ToLower(str)
+ }
+}
+
+func TestToLower(t *testing.T) {
+ It("toLower", func() {
+ str := "AaBbCcDdEeFfGg"
+ Expect(ToLower(str)).To(Equal(strings.ToLower(str)))
+
+ str = "ABCDE"
+ Expect(ToLower(str)).To(Equal(strings.ToLower(str)))
+
+ str = "ABCDE"
+ Expect(ToLower(str)).To(Equal(strings.ToLower(str)))
+
+ str = "abced"
+ Expect(ToLower(str)).To(Equal(strings.ToLower(str)))
+ })
+}
+
+func TestIsLower(t *testing.T) {
+ It("isLower", func() {
+ str := "AaBbCcDdEeFfGg"
+ Expect(isLower(str)).To(BeFalse())
+
+ str = "ABCDE"
+ Expect(isLower(str)).To(BeFalse())
+
+ str = "abcdefg"
+ Expect(isLower(str)).To(BeTrue())
+ })
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal_test.go
new file mode 100644
index 0000000..a631719
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal_test.go
@@ -0,0 +1,354 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+)
+
+var _ = Describe("newClusterState", func() {
+ var state *clusterState
+
+ createClusterState := func(slots []ClusterSlot) *clusterState {
+ opt := &ClusterOptions{}
+ opt.init()
+ nodes := newClusterNodes(opt)
+ state, err := newClusterState(nodes, slots, "10.10.10.10:1234")
+ Expect(err).NotTo(HaveOccurred())
+ return state
+ }
+
+ Describe("sorting", func() {
+ BeforeEach(func() {
+ state = createClusterState([]ClusterSlot{{
+ Start: 1000,
+ End: 1999,
+ }, {
+ Start: 0,
+ End: 999,
+ }, {
+ Start: 2000,
+ End: 2999,
+ }})
+ })
+
+ It("sorts slots", func() {
+ Expect(state.slots).To(Equal([]*clusterSlot{
+ {start: 0, end: 999, nodes: nil},
+ {start: 1000, end: 1999, nodes: nil},
+ {start: 2000, end: 2999, nodes: nil},
+ }))
+ })
+ })
+
+ Describe("loopback", func() {
+ BeforeEach(func() {
+ state = createClusterState([]ClusterSlot{{
+ Nodes: []ClusterNode{{Addr: "127.0.0.1:7001"}},
+ }, {
+ Nodes: []ClusterNode{{Addr: "127.0.0.1:7002"}},
+ }, {
+ Nodes: []ClusterNode{{Addr: "1.2.3.4:1234"}},
+ }, {
+ Nodes: []ClusterNode{{Addr: ":1234"}},
+ }})
+ })
+
+ It("replaces loopback hosts in addresses", func() {
+ slotAddr := func(slot *clusterSlot) string {
+ return slot.nodes[0].Client.Options().Addr
+ }
+
+ Expect(slotAddr(state.slots[0])).To(Equal("10.10.10.10:7001"))
+ Expect(slotAddr(state.slots[1])).To(Equal("10.10.10.10:7002"))
+ Expect(slotAddr(state.slots[2])).To(Equal("1.2.3.4:1234"))
+ Expect(slotAddr(state.slots[3])).To(Equal(":1234"))
+ })
+ })
+})
+
+type fixedHash string
+
+func (h fixedHash) Get(string) string {
+ return string(h)
+}
+
+func TestRingSetAddrsAndRebalanceRace(t *testing.T) {
+ const (
+ ringShard1Name = "ringShardOne"
+ ringShard2Name = "ringShardTwo"
+
+ ringShard1Port = "6390"
+ ringShard2Port = "6391"
+ )
+
+ ring := NewRing(&RingOptions{
+ Addrs: map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ },
+ // Disable heartbeat
+ HeartbeatFrequency: 1 * time.Hour,
+ NewConsistentHash: func(shards []string) ConsistentHash {
+ switch len(shards) {
+ case 1:
+ return fixedHash(ringShard1Name)
+ case 2:
+ return fixedHash(ringShard2Name)
+ default:
+ t.Fatalf("Unexpected number of shards: %v", shards)
+ return nil
+ }
+ },
+ })
+ defer ring.Close()
+
+ // Continuously update addresses by adding and removing one address
+ updatesDone := make(chan struct{})
+ defer func() { close(updatesDone) }()
+ go func() {
+ for i := 0; ; i++ {
+ select {
+ case <-updatesDone:
+ return
+ default:
+ if i%2 == 0 {
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ })
+ } else {
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ ringShard2Name: ":" + ringShard2Port,
+ })
+ }
+ }
+ }
+ }()
+
+ timer := time.NewTimer(1 * time.Second)
+ for running := true; running; {
+ select {
+ case <-timer.C:
+ running = false
+ default:
+ shard, err := ring.sharding.GetByKey("whatever")
+ if err == nil && shard == nil {
+ t.Fatal("shard is nil")
+ }
+ }
+ }
+}
+
+func BenchmarkRingShardingRebalanceLocked(b *testing.B) {
+ opts := &RingOptions{
+ Addrs: make(map[string]string),
+ // Disable heartbeat
+ HeartbeatFrequency: 1 * time.Hour,
+ }
+ for i := 0; i < 100; i++ {
+ opts.Addrs[fmt.Sprintf("shard%d", i)] = fmt.Sprintf(":63%02d", i)
+ }
+
+ ring := NewRing(opts)
+ defer ring.Close()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ring.sharding.rebalanceLocked()
+ }
+}
+
+type testCounter struct {
+ mu sync.Mutex
+ t *testing.T
+ m map[string]int
+}
+
+func newTestCounter(t *testing.T) *testCounter {
+ return &testCounter{t: t, m: make(map[string]int)}
+}
+
+func (ct *testCounter) increment(key string) {
+ ct.mu.Lock()
+ defer ct.mu.Unlock()
+ ct.m[key]++
+}
+
+func (ct *testCounter) expect(values map[string]int) {
+ ct.mu.Lock()
+ defer ct.mu.Unlock()
+ ct.t.Helper()
+ if !reflect.DeepEqual(values, ct.m) {
+ ct.t.Errorf("expected %v != actual %v", values, ct.m)
+ }
+}
+
+func TestRingShardsCleanup(t *testing.T) {
+ const (
+ ringShard1Name = "ringShardOne"
+ ringShard2Name = "ringShardTwo"
+
+ ringShard1Addr = "shard1.test"
+ ringShard2Addr = "shard2.test"
+ )
+
+ t.Run("closes unused shards", func(t *testing.T) {
+ closeCounter := newTestCounter(t)
+
+ ring := NewRing(&RingOptions{
+ Addrs: map[string]string{
+ ringShard1Name: ringShard1Addr,
+ ringShard2Name: ringShard2Addr,
+ },
+ NewClient: func(opt *Options) *Client {
+ c := NewClient(opt)
+ c.baseClient.onClose = func() error {
+ closeCounter.increment(opt.Addr)
+ return nil
+ }
+ return c
+ },
+ })
+ closeCounter.expect(map[string]int{})
+
+ // no change due to the same addresses
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ringShard1Addr,
+ ringShard2Name: ringShard2Addr,
+ })
+ closeCounter.expect(map[string]int{})
+
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ringShard1Addr,
+ })
+ closeCounter.expect(map[string]int{ringShard2Addr: 1})
+
+ ring.SetAddrs(map[string]string{
+ ringShard2Name: ringShard2Addr,
+ })
+ closeCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 1})
+
+ ring.Close()
+ closeCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 2})
+ })
+
+ t.Run("closes created shards if ring was closed", func(t *testing.T) {
+ createCounter := newTestCounter(t)
+ closeCounter := newTestCounter(t)
+
+ var (
+ ring *Ring
+ shouldClose int32
+ )
+
+ ring = NewRing(&RingOptions{
+ Addrs: map[string]string{
+ ringShard1Name: ringShard1Addr,
+ },
+ NewClient: func(opt *Options) *Client {
+ if atomic.LoadInt32(&shouldClose) != 0 {
+ ring.Close()
+ }
+ createCounter.increment(opt.Addr)
+ c := NewClient(opt)
+ c.baseClient.onClose = func() error {
+ closeCounter.increment(opt.Addr)
+ return nil
+ }
+ return c
+ },
+ })
+ createCounter.expect(map[string]int{ringShard1Addr: 1})
+ closeCounter.expect(map[string]int{})
+
+ atomic.StoreInt32(&shouldClose, 1)
+
+ ring.SetAddrs(map[string]string{
+ ringShard2Name: ringShard2Addr,
+ })
+ createCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 1})
+ closeCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 1})
+ })
+}
+
+//------------------------------------------------------------------------------
+
+type timeoutErr struct {
+ error
+}
+
+func (e timeoutErr) Timeout() bool {
+ return true
+}
+
+func (e timeoutErr) Temporary() bool {
+ return true
+}
+
+func (e timeoutErr) Error() string {
+ return "i/o timeout"
+}
+
+var _ = Describe("withConn", func() {
+ var client *Client
+
+ BeforeEach(func() {
+ client = NewClient(&Options{
+ PoolSize: 1,
+ })
+ })
+
+ AfterEach(func() {
+ client.Close()
+ })
+
+ It("should replace the connection in the pool when there is no error", func() {
+ var conn *pool.Conn
+
+ client.withConn(ctx, func(ctx context.Context, c *pool.Conn) error {
+ conn = c
+ return nil
+ })
+
+ newConn, err := client.connPool.Get(ctx)
+ Expect(err).To(BeNil())
+ Expect(newConn).To(Equal(conn))
+ })
+
+ It("should replace the connection in the pool when there is an error not related to a bad connection", func() {
+ var conn *pool.Conn
+
+ client.withConn(ctx, func(ctx context.Context, c *pool.Conn) error {
+ conn = c
+ return proto.RedisError("LOADING")
+ })
+
+ newConn, err := client.connPool.Get(ctx)
+ Expect(err).To(BeNil())
+ Expect(newConn).To(Equal(conn))
+ })
+
+ It("should remove the connection from the pool when it times out", func() {
+ var conn *pool.Conn
+
+ client.withConn(ctx, func(ctx context.Context, c *pool.Conn) error {
+ conn = c
+ return timeoutErr{}
+ })
+
+ newConn, err := client.connPool.Get(ctx)
+ Expect(err).To(BeNil())
+ Expect(newConn).NotTo(Equal(conn))
+ Expect(client.connPool.Len()).To(Equal(1))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator.go
index 2f8bc2b..cd1a828 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator.go
@@ -2,30 +2,21 @@ package redis
import (
"context"
- "sync"
)
// ScanIterator is used to incrementally iterate over a collection of elements.
-// It's safe for concurrent use by multiple goroutines.
type ScanIterator struct {
- mu sync.Mutex // protects Scanner and pos
cmd *ScanCmd
pos int
}
// Err returns the last iterator error, if any.
func (it *ScanIterator) Err() error {
- it.mu.Lock()
- err := it.cmd.Err()
- it.mu.Unlock()
- return err
+ return it.cmd.Err()
}
// Next advances the cursor and returns true if more values can be read.
func (it *ScanIterator) Next(ctx context.Context) bool {
- it.mu.Lock()
- defer it.mu.Unlock()
-
// Instantly return on errors.
if it.cmd.Err() != nil {
return false
@@ -68,10 +59,8 @@ func (it *ScanIterator) Next(ctx context.Context) bool {
// Val returns the key/field at the current cursor position.
func (it *ScanIterator) Val() string {
var v string
- it.mu.Lock()
if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
v = it.cmd.page[it.pos-1]
}
- it.mu.Unlock()
return v
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator_test.go
index 68c8b77..ccd9414 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator_test.go
@@ -3,10 +3,10 @@ package redis_test
import (
"fmt"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("ScanIterator", func() {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json.go
new file mode 100644
index 0000000..ca731db
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json.go
@@ -0,0 +1,599 @@
+package redis
+
+import (
+ "context"
+ "encoding/json"
+ "strings"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+// -------------------------------------------
+
+type JSONCmdable interface {
+ JSONArrAppend(ctx context.Context, key, path string, values ...interface{}) *IntSliceCmd
+ JSONArrIndex(ctx context.Context, key, path string, value ...interface{}) *IntSliceCmd
+ JSONArrIndexWithArgs(ctx context.Context, key, path string, options *JSONArrIndexArgs, value ...interface{}) *IntSliceCmd
+ JSONArrInsert(ctx context.Context, key, path string, index int64, values ...interface{}) *IntSliceCmd
+ JSONArrLen(ctx context.Context, key, path string) *IntSliceCmd
+ JSONArrPop(ctx context.Context, key, path string, index int) *StringSliceCmd
+ JSONArrTrim(ctx context.Context, key, path string) *IntSliceCmd
+ JSONArrTrimWithArgs(ctx context.Context, key, path string, options *JSONArrTrimArgs) *IntSliceCmd
+ JSONClear(ctx context.Context, key, path string) *IntCmd
+ JSONDebugMemory(ctx context.Context, key, path string) *IntCmd
+ JSONDel(ctx context.Context, key, path string) *IntCmd
+ JSONForget(ctx context.Context, key, path string) *IntCmd
+ JSONGet(ctx context.Context, key string, paths ...string) *JSONCmd
+ JSONGetWithArgs(ctx context.Context, key string, options *JSONGetArgs, paths ...string) *JSONCmd
+ JSONMerge(ctx context.Context, key, path string, value string) *StatusCmd
+ JSONMSetArgs(ctx context.Context, docs []JSONSetArgs) *StatusCmd
+ JSONMSet(ctx context.Context, params ...interface{}) *StatusCmd
+ JSONMGet(ctx context.Context, path string, keys ...string) *JSONSliceCmd
+ JSONNumIncrBy(ctx context.Context, key, path string, value float64) *JSONCmd
+ JSONObjKeys(ctx context.Context, key, path string) *SliceCmd
+ JSONObjLen(ctx context.Context, key, path string) *IntPointerSliceCmd
+ JSONSet(ctx context.Context, key, path string, value interface{}) *StatusCmd
+ JSONSetMode(ctx context.Context, key, path string, value interface{}, mode string) *StatusCmd
+ JSONStrAppend(ctx context.Context, key, path, value string) *IntPointerSliceCmd
+ JSONStrLen(ctx context.Context, key, path string) *IntPointerSliceCmd
+ JSONToggle(ctx context.Context, key, path string) *IntPointerSliceCmd
+ JSONType(ctx context.Context, key, path string) *JSONSliceCmd
+}
+
+type JSONSetArgs struct {
+ Key string
+ Path string
+ Value interface{}
+}
+
+type JSONArrIndexArgs struct {
+ Start int
+ Stop *int
+}
+
+type JSONArrTrimArgs struct {
+ Start int
+ Stop *int
+}
+
+type JSONCmd struct {
+ baseCmd
+ val string
+ expanded []interface{}
+}
+
+var _ Cmder = (*JSONCmd)(nil)
+
+func newJSONCmd(ctx context.Context, args ...interface{}) *JSONCmd {
+ return &JSONCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *JSONCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *JSONCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *JSONCmd) Val() string {
+ if len(cmd.val) == 0 && cmd.expanded != nil {
+ val, err := json.Marshal(cmd.expanded)
+ if err != nil {
+ cmd.SetErr(err)
+ return ""
+ }
+ return string(val)
+
+ } else {
+ return cmd.val
+ }
+}
+
+func (cmd *JSONCmd) Result() (string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd JSONCmd) Expanded() (interface{}, error) {
+ if len(cmd.val) != 0 && cmd.expanded == nil {
+ err := json.Unmarshal([]byte(cmd.val), &cmd.expanded)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return cmd.expanded, nil
+}
+
+func (cmd *JSONCmd) readReply(rd *proto.Reader) error {
+ // nil response from JSON.(M)GET (cmd.baseCmd.err will be "redis: nil")
+ if cmd.baseCmd.Err() == Nil {
+ cmd.val = ""
+ return Nil
+ }
+
+ if readType, err := rd.PeekReplyType(); err != nil {
+ return err
+ } else if readType == proto.RespArray {
+
+ size, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ expanded := make([]interface{}, size)
+
+ for i := 0; i < size; i++ {
+ if expanded[i], err = rd.ReadReply(); err != nil {
+ return err
+ }
+ }
+ cmd.expanded = expanded
+
+ } else {
+ if str, err := rd.ReadString(); err != nil && err != Nil {
+ return err
+ } else if str == "" || err == Nil {
+ cmd.val = ""
+ } else {
+ cmd.val = str
+ }
+ }
+
+ return nil
+}
+
+// -------------------------------------------
+
+type JSONSliceCmd struct {
+ baseCmd
+ val []interface{}
+}
+
+func NewJSONSliceCmd(ctx context.Context, args ...interface{}) *JSONSliceCmd {
+ return &JSONSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *JSONSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *JSONSliceCmd) SetVal(val []interface{}) {
+ cmd.val = val
+}
+
+func (cmd *JSONSliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *JSONSliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *JSONSliceCmd) readReply(rd *proto.Reader) error {
+ if cmd.baseCmd.Err() == Nil {
+ cmd.val = nil
+ return Nil
+ }
+
+ if readType, err := rd.PeekReplyType(); err != nil {
+ return err
+ } else if readType == proto.RespArray {
+ response, err := rd.ReadReply()
+ if err != nil {
+ return nil
+ } else {
+ cmd.val = response.([]interface{})
+ }
+
+ } else {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]interface{}, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ }
+ return nil
+}
+
+/*******************************************************************************
+*
+* IntPointerSliceCmd
+* used to represent a RedisJSON response where the result is either an integer or nil
+*
+*******************************************************************************/
+
+type IntPointerSliceCmd struct {
+ baseCmd
+ val []*int64
+}
+
+// NewIntPointerSliceCmd initialises an IntPointerSliceCmd
+func NewIntPointerSliceCmd(ctx context.Context, args ...interface{}) *IntPointerSliceCmd {
+ return &IntPointerSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntPointerSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntPointerSliceCmd) SetVal(val []*int64) {
+ cmd.val = val
+}
+
+func (cmd *IntPointerSliceCmd) Val() []*int64 {
+ return cmd.val
+}
+
+func (cmd *IntPointerSliceCmd) Result() ([]*int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntPointerSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]*int64, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ val, err := rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ } else if err != Nil {
+ cmd.val[i] = &val
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// JSONArrAppend adds the provided JSON values to the end of the array at the given path.
+// For more information, see https://redis.io/commands/json.arrappend
+func (c cmdable) JSONArrAppend(ctx context.Context, key, path string, values ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRAPPEND", key, path}
+ args = append(args, values...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrIndex searches for the first occurrence of the provided JSON value in the array at the given path.
+// For more information, see https://redis.io/commands/json.arrindex
+func (c cmdable) JSONArrIndex(ctx context.Context, key, path string, value ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRINDEX", key, path}
+ args = append(args, value...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrIndexWithArgs searches for the first occurrence of a JSON value in an array while allowing the start and
+// stop options to be provided.
+// For more information, see https://redis.io/commands/json.arrindex
+func (c cmdable) JSONArrIndexWithArgs(ctx context.Context, key, path string, options *JSONArrIndexArgs, value ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRINDEX", key, path}
+ args = append(args, value...)
+
+ if options != nil {
+ args = append(args, options.Start)
+ if options.Stop != nil {
+ args = append(args, *options.Stop)
+ }
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrInsert inserts the JSON values into the array at the specified path before the index (shifts to the right).
+// For more information, see https://redis.io/commands/json.arrinsert
+func (c cmdable) JSONArrInsert(ctx context.Context, key, path string, index int64, values ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRINSERT", key, path, index}
+ args = append(args, values...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrLen reports the length of the JSON array at the specified path in the given key.
+// For more information, see https://redis.io/commands/json.arrlen
+func (c cmdable) JSONArrLen(ctx context.Context, key, path string) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRLEN", key, path}
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrPop removes and returns an element from the specified index in the array.
+// For more information, see https://redis.io/commands/json.arrpop
+func (c cmdable) JSONArrPop(ctx context.Context, key, path string, index int) *StringSliceCmd {
+ args := []interface{}{"JSON.ARRPOP", key, path, index}
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrTrim trims an array to contain only the specified inclusive range of elements.
+// For more information, see https://redis.io/commands/json.arrtrim
+func (c cmdable) JSONArrTrim(ctx context.Context, key, path string) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRTRIM", key, path}
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrTrimWithArgs trims an array to contain only the specified inclusive range of elements.
+// For more information, see https://redis.io/commands/json.arrtrim
+func (c cmdable) JSONArrTrimWithArgs(ctx context.Context, key, path string, options *JSONArrTrimArgs) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRTRIM", key, path}
+
+ if options != nil {
+ args = append(args, options.Start)
+
+ if options.Stop != nil {
+ args = append(args, *options.Stop)
+ }
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONClear clears container values (arrays/objects) and sets numeric values to 0.
+// For more information, see https://redis.io/commands/json.clear
+func (c cmdable) JSONClear(ctx context.Context, key, path string) *IntCmd {
+ args := []interface{}{"JSON.CLEAR", key, path}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONDebugMemory reports a value's memory usage in bytes (unimplemented)
+// For more information, see https://redis.io/commands/json.debug-memory
+func (c cmdable) JSONDebugMemory(ctx context.Context, key, path string) *IntCmd {
+ panic("not implemented")
+}
+
+// JSONDel deletes a value.
+// For more information, see https://redis.io/commands/json.del
+func (c cmdable) JSONDel(ctx context.Context, key, path string) *IntCmd {
+ args := []interface{}{"JSON.DEL", key, path}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONForget deletes a value.
+// For more information, see https://redis.io/commands/json.forget
+func (c cmdable) JSONForget(ctx context.Context, key, path string) *IntCmd {
+ args := []interface{}{"JSON.FORGET", key, path}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONGet returns the value at path in JSON serialized form. JSON.GET returns an
+// array of strings. This function parses out the wrapping array but leaves the
+// internal strings unprocessed by default (see Val())
+// For more information - https://redis.io/commands/json.get/
+func (c cmdable) JSONGet(ctx context.Context, key string, paths ...string) *JSONCmd {
+ args := make([]interface{}, len(paths)+2)
+ args[0] = "JSON.GET"
+ args[1] = key
+ for n, path := range paths {
+ args[n+2] = path
+ }
+ cmd := newJSONCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type JSONGetArgs struct {
+ Indent string
+ Newline string
+ Space string
+}
+
+// JSONGetWithArgs - Retrieves the value of a key from a JSON document.
+// This function also allows for specifying additional options such as:
+// Indention, NewLine and Space
+// For more information - https://redis.io/commands/json.get/
+func (c cmdable) JSONGetWithArgs(ctx context.Context, key string, options *JSONGetArgs, paths ...string) *JSONCmd {
+ args := []interface{}{"JSON.GET", key}
+ if options != nil {
+ if options.Indent != "" {
+ args = append(args, "INDENT", options.Indent)
+ }
+ if options.Newline != "" {
+ args = append(args, "NEWLINE", options.Newline)
+ }
+ if options.Space != "" {
+ args = append(args, "SPACE", options.Space)
+ }
+ for _, path := range paths {
+ args = append(args, path)
+ }
+ }
+ cmd := newJSONCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONMerge merges a given JSON value into matching paths.
+// For more information, see https://redis.io/commands/json.merge
+func (c cmdable) JSONMerge(ctx context.Context, key, path string, value string) *StatusCmd {
+ args := []interface{}{"JSON.MERGE", key, path, value}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONMGet returns the values at the specified path from multiple key arguments.
+// Note - the arguments are reversed when compared with `JSON.MGET` as we want
+// to follow the pattern of having the last argument be variable.
+// For more information, see https://redis.io/commands/json.mget
+func (c cmdable) JSONMGet(ctx context.Context, path string, keys ...string) *JSONSliceCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "JSON.MGET"
+ for n, key := range keys {
+ args[n+1] = key
+ }
+ args = append(args, path)
+ cmd := NewJSONSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONMSetArgs sets or updates one or more JSON values according to the specified key-path-value triplets.
+// For more information, see https://redis.io/commands/json.mset
+func (c cmdable) JSONMSetArgs(ctx context.Context, docs []JSONSetArgs) *StatusCmd {
+ args := []interface{}{"JSON.MSET"}
+ for _, doc := range docs {
+ args = append(args, doc.Key, doc.Path, doc.Value)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) JSONMSet(ctx context.Context, params ...interface{}) *StatusCmd {
+ args := []interface{}{"JSON.MSET"}
+ args = append(args, params...)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONNumIncrBy increments the number value stored at the specified path by the provided number.
+// For more information, see https://redis.io/commands/json.numincreby
+func (c cmdable) JSONNumIncrBy(ctx context.Context, key, path string, value float64) *JSONCmd {
+ args := []interface{}{"JSON.NUMINCRBY", key, path, value}
+ cmd := newJSONCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONObjKeys returns the keys in the object that's referenced by the specified path.
+// For more information, see https://redis.io/commands/json.objkeys
+func (c cmdable) JSONObjKeys(ctx context.Context, key, path string) *SliceCmd {
+ args := []interface{}{"JSON.OBJKEYS", key, path}
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONObjLen reports the number of keys in the JSON object at the specified path in the given key.
+// For more information, see https://redis.io/commands/json.objlen
+func (c cmdable) JSONObjLen(ctx context.Context, key, path string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.OBJLEN", key, path}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONSet sets the JSON value at the given path in the given key. The value must be something that
+// can be marshaled to JSON (using encoding/JSON) unless the argument is a string or a []byte when we assume that
+// it can be passed directly as JSON.
+// For more information, see https://redis.io/commands/json.set
+func (c cmdable) JSONSet(ctx context.Context, key, path string, value interface{}) *StatusCmd {
+ return c.JSONSetMode(ctx, key, path, value, "")
+}
+
+// JSONSetMode sets the JSON value at the given path in the given key and allows the mode to be set
+// (the mode value must be "XX" or "NX"). The value must be something that can be marshaled to JSON (using encoding/JSON) unless
+// the argument is a string or []byte when we assume that it can be passed directly as JSON.
+// For more information, see https://redis.io/commands/json.set
+func (c cmdable) JSONSetMode(ctx context.Context, key, path string, value interface{}, mode string) *StatusCmd {
+ var bytes []byte
+ var err error
+ switch v := value.(type) {
+ case string:
+ bytes = []byte(v)
+ case []byte:
+ bytes = v
+ default:
+ bytes, err = json.Marshal(v)
+ }
+ args := []interface{}{"JSON.SET", key, path, util.BytesToString(bytes)}
+ if mode != "" {
+ switch strings.ToUpper(mode) {
+ case "XX", "NX":
+ args = append(args, strings.ToUpper(mode))
+
+ default:
+ panic("redis: JSON.SET mode must be NX or XX")
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ _ = c(ctx, cmd)
+ }
+ return cmd
+}
+
+// JSONStrAppend appends the JSON-string values to the string at the specified path.
+// For more information, see https://redis.io/commands/json.strappend
+func (c cmdable) JSONStrAppend(ctx context.Context, key, path, value string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.STRAPPEND", key, path, value}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONStrLen reports the length of the JSON String at the specified path in the given key.
+// For more information, see https://redis.io/commands/json.strlen
+func (c cmdable) JSONStrLen(ctx context.Context, key, path string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.STRLEN", key, path}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONToggle toggles a Boolean value stored at the specified path.
+// For more information, see https://redis.io/commands/json.toggle
+func (c cmdable) JSONToggle(ctx context.Context, key, path string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.TOGGLE", key, path}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONType reports the type of JSON value at the specified path.
+// For more information, see https://redis.io/commands/json.type
+func (c cmdable) JSONType(ctx context.Context, key, path string) *JSONSliceCmd {
+ args := []interface{}{"JSON.TYPE", key, path}
+ cmd := NewJSONSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json_test.go
new file mode 100644
index 0000000..4e9718a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json_test.go
@@ -0,0 +1,660 @@
+package redis_test
+
+import (
+ "context"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9"
+)
+
+type JSONGetTestStruct struct {
+ Hello string `json:"hello"`
+}
+
+var _ = Describe("JSON Commands", Label("json"), func() {
+ ctx := context.TODO()
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(&redis.Options{Addr: ":6379"})
+ Expect(client.FlushAll(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ Describe("arrays", Label("arrays"), func() {
+ It("should JSONArrAppend", Label("json.arrappend", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "append2", "$", `{"a": [10], "b": {"a": [12, 13]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONArrAppend(ctx, "append2", "$..a", 10)
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal([]int64{2, 3}))
+ })
+
+ It("should JSONArrIndex and JSONArrIndexWithArgs", Label("json.arrindex", "json"), func() {
+ cmd1, err := client.JSONSet(ctx, "index1", "$", `{"a": [10], "b": {"a": [12, 10]}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd1).To(Equal("OK"))
+
+ cmd2, err := client.JSONArrIndex(ctx, "index1", "$.b.a", 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd2).To(Equal([]int64{1}))
+
+ cmd3, err := client.JSONSet(ctx, "index2", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd3).To(Equal("OK"))
+
+ res, err := client.JSONArrIndex(ctx, "index2", "$", 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(1)))
+
+ res, err = client.JSONArrIndex(ctx, "index2", "$", 1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(-1)))
+
+ res, err = client.JSONArrIndex(ctx, "index2", "$", 4).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(4)))
+
+ res, err = client.JSONArrIndexWithArgs(ctx, "index2", "$", &redis.JSONArrIndexArgs{}, 4).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(4)))
+
+ stop := 5000
+ res, err = client.JSONArrIndexWithArgs(ctx, "index2", "$", &redis.JSONArrIndexArgs{Stop: &stop}, 4).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(4)))
+
+ stop = -1
+ res, err = client.JSONArrIndexWithArgs(ctx, "index2", "$", &redis.JSONArrIndexArgs{Stop: &stop}, 4).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(-1)))
+ })
+
+ It("should JSONArrIndex and JSONArrIndexWithArgs with $", Label("json.arrindex", "json"), func() {
+ doc := `{
+ "store": {
+ "book": [
+ {
+ "category": "reference",
+ "author": "Nigel Rees",
+ "title": "Sayings of the Century",
+ "price": 8.95,
+ "size": [10, 20, 30, 40]
+ },
+ {
+ "category": "fiction",
+ "author": "Evelyn Waugh",
+ "title": "Sword of Honour",
+ "price": 12.99,
+ "size": [50, 60, 70, 80]
+ },
+ {
+ "category": "fiction",
+ "author": "Herman Melville",
+ "title": "Moby Dick",
+ "isbn": "0-553-21311-3",
+ "price": 8.99,
+ "size": [5, 10, 20, 30]
+ },
+ {
+ "category": "fiction",
+ "author": "J. R. R. Tolkien",
+ "title": "The Lord of the Rings",
+ "isbn": "0-395-19395-8",
+ "price": 22.99,
+ "size": [5, 6, 7, 8]
+ }
+ ],
+ "bicycle": {"color": "red", "price": 19.95}
+ }
+ }`
+ res, err := client.JSONSet(ctx, "doc1", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ resGet, err := client.JSONGet(ctx, "doc1", "$.store.book[?(@.price<10)].size").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal("[[10,20,30,40],[5,10,20,30]]"))
+
+ resArr, err := client.JSONArrIndex(ctx, "doc1", "$.store.book[?(@.price<10)].size", 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resArr).To(Equal([]int64{1, 2}))
+ })
+
+ It("should JSONArrInsert", Label("json.arrinsert", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "insert2", "$", `[100, 200, 300, 200]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONArrInsert(ctx, "insert2", "$", -1, 1, 2)
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal([]int64{6}))
+
+ cmd3 := client.JSONGet(ctx, "insert2")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ // RESP2 vs RESP3
+ Expect(cmd3.Val()).To(Or(
+ Equal(`[100,200,300,1,2,200]`),
+ Equal(`[[100,200,300,1,2,200]]`)))
+ })
+
+ It("should JSONArrLen", Label("json.arrlen", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "length2", "$", `{"a": [10], "b": {"a": [12, 10, 20, 12, 90, 10]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONArrLen(ctx, "length2", "$..a")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal([]int64{1, 6}))
+ })
+
+ It("should JSONArrPop", Label("json.arrpop"), func() {
+ cmd1 := client.JSONSet(ctx, "pop4", "$", `[100, 200, 300, 200]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONArrPop(ctx, "pop4", "$", 2)
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal([]string{"300"}))
+
+ cmd3 := client.JSONGet(ctx, "pop4", "$")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(Equal("[[100,200,200]]"))
+ })
+
+ It("should JSONArrTrim", Label("json.arrtrim", "json"), func() {
+ cmd1, err := client.JSONSet(ctx, "trim1", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd1).To(Equal("OK"))
+
+ stop := 3
+ cmd2, err := client.JSONArrTrimWithArgs(ctx, "trim1", "$", &redis.JSONArrTrimArgs{Start: 1, Stop: &stop}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd2).To(Equal([]int64{3}))
+
+ res, err := client.JSONGet(ctx, "trim1", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[[1,2,3]]`))
+
+ cmd3, err := client.JSONSet(ctx, "trim2", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd3).To(Equal("OK"))
+
+ stop = 3
+ cmd4, err := client.JSONArrTrimWithArgs(ctx, "trim2", "$", &redis.JSONArrTrimArgs{Start: -1, Stop: &stop}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd4).To(Equal([]int64{0}))
+
+ cmd5, err := client.JSONSet(ctx, "trim3", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd5).To(Equal("OK"))
+
+ stop = 99
+ cmd6, err := client.JSONArrTrimWithArgs(ctx, "trim3", "$", &redis.JSONArrTrimArgs{Start: 3, Stop: &stop}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd6).To(Equal([]int64{2}))
+
+ cmd7, err := client.JSONSet(ctx, "trim4", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd7).To(Equal("OK"))
+
+ stop = 1
+ cmd8, err := client.JSONArrTrimWithArgs(ctx, "trim4", "$", &redis.JSONArrTrimArgs{Start: 9, Stop: &stop}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd8).To(Equal([]int64{0}))
+
+ cmd9, err := client.JSONSet(ctx, "trim5", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd9).To(Equal("OK"))
+
+ stop = 11
+ cmd10, err := client.JSONArrTrimWithArgs(ctx, "trim5", "$", &redis.JSONArrTrimArgs{Start: 9, Stop: &stop}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd10).To(Equal([]int64{0}))
+ })
+
+ It("should JSONArrPop", Label("json.arrpop", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "pop4", "$", `[100, 200, 300, 200]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONArrPop(ctx, "pop4", "$", 2)
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal([]string{"300"}))
+
+ cmd3 := client.JSONGet(ctx, "pop4", "$")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(Equal("[[100,200,200]]"))
+ })
+ })
+
+ Describe("get/set", Label("getset"), func() {
+ It("should JSONSet", Label("json.set", "json"), func() {
+ cmd := client.JSONSet(ctx, "set1", "$", `{"a": 1, "b": 2, "hello": "world"}`)
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal("OK"))
+ })
+
+ It("should JSONGet", Label("json.get", "json"), func() {
+ res, err := client.JSONSet(ctx, "get3", "$", `{"a": 1, "b": 2}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONGetWithArgs(ctx, "get3", &redis.JSONGetArgs{Indent: "-"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[-{--"a":1,--"b":2-}]`))
+
+ res, err = client.JSONGetWithArgs(ctx, "get3", &redis.JSONGetArgs{Indent: "-", Newline: `~`, Space: `!`}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[~-{~--"a":!1,~--"b":!2~-}~]`))
+ })
+
+ It("should JSONMerge", Label("json.merge", "json"), func() {
+ res, err := client.JSONSet(ctx, "merge1", "$", `{"a": 1, "b": 2}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONMerge(ctx, "merge1", "$", `{"b": 3, "c": 4}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONGet(ctx, "merge1", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[{"a":1,"b":3,"c":4}]`))
+ })
+
+ It("should JSONMSet", Label("json.mset", "json", "NonRedisEnterprise"), func() {
+ doc1 := redis.JSONSetArgs{Key: "mset1", Path: "$", Value: `{"a": 1}`}
+ doc2 := redis.JSONSetArgs{Key: "mset2", Path: "$", Value: 2}
+ docs := []redis.JSONSetArgs{doc1, doc2}
+
+ mSetResult, err := client.JSONMSetArgs(ctx, docs).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(mSetResult).To(Equal("OK"))
+
+ res, err := client.JSONMGet(ctx, "$", "mset1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]interface{}{`[{"a":1}]`}))
+
+ res, err = client.JSONMGet(ctx, "$", "mset1", "mset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]interface{}{`[{"a":1}]`, "[2]"}))
+
+ _, err = client.JSONMSet(ctx, "mset1", "$.a", 2, "mset3", "$", `[1]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should JSONMGet", Label("json.mget", "json", "NonRedisEnterprise"), func() {
+ cmd1 := client.JSONSet(ctx, "mget2a", "$", `{"a": ["aa", "ab", "ac", "ad"], "b": {"a": ["ba", "bb", "bc", "bd"]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+ cmd2 := client.JSONSet(ctx, "mget2b", "$", `{"a": [100, 200, 300, 200], "b": {"a": [100, 200, 300, 200]}}`)
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal("OK"))
+
+ cmd3 := client.JSONMGet(ctx, "$..a", "mget2a", "mget2b")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(HaveLen(2))
+ Expect(cmd3.Val()[0]).To(Equal(`[["aa","ab","ac","ad"],["ba","bb","bc","bd"]]`))
+ Expect(cmd3.Val()[1]).To(Equal(`[[100,200,300,200],[100,200,300,200]]`))
+ })
+
+ It("should JSONMget with $", Label("json.mget", "json", "NonRedisEnterprise"), func() {
+ res, err := client.JSONSet(ctx, "doc1", "$", `{"a": 1, "b": 2, "nested": {"a": 3}, "c": "", "nested2": {"a": ""}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONSet(ctx, "doc2", "$", `{"a": 4, "b": 5, "nested": {"a": 6}, "c": "", "nested2": {"a": [""]}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err := client.JSONMGet(ctx, "$..a", "doc1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal([]interface{}{`[1,3,""]`}))
+
+ iRes, err = client.JSONMGet(ctx, "$..a", "doc1", "doc2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal([]interface{}{`[1,3,""]`, `[4,6,[""]]`}))
+
+ iRes, err = client.JSONMGet(ctx, "$..a", "non_existing_doc", "non_existing_doc1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal([]interface{}{nil, nil}))
+ })
+ })
+
+ Describe("Misc", Label("misc"), func() {
+ It("should JSONClear", Label("json.clear", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "clear1", "$", `[1]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONClear(ctx, "clear1", "$")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal(int64(1)))
+
+ cmd3 := client.JSONGet(ctx, "clear1", "$")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(Equal(`[[]]`))
+ })
+
+ It("should JSONClear with $", Label("json.clear", "json"), func() {
+ doc := `{
+ "nested1": {"a": {"foo": 10, "bar": 20}},
+ "a": ["foo"],
+ "nested2": {"a": "claro"},
+ "nested3": {"a": {"baz": 50}}
+ }`
+ res, err := client.JSONSet(ctx, "doc1", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err := client.JSONClear(ctx, "doc1", "$..a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(3)))
+
+ resGet, err := client.JSONGet(ctx, "doc1", `$`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested1":{"a":{}},"a":[],"nested2":{"a":"claro"},"nested3":{"a":{}}}]`))
+
+ res, err = client.JSONSet(ctx, "doc1", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err = client.JSONClear(ctx, "doc1", "$.nested1.a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(1)))
+
+ resGet, err = client.JSONGet(ctx, "doc1", `$`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested1":{"a":{}},"a":["foo"],"nested2":{"a":"claro"},"nested3":{"a":{"baz":50}}}]`))
+ })
+
+ It("should JSONDel", Label("json.del", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "del1", "$", `[1]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONDel(ctx, "del1", "$")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal(int64(1)))
+
+ cmd3 := client.JSONGet(ctx, "del1", "$")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(HaveLen(0))
+ })
+
+ It("should JSONDel with $", Label("json.del", "json"), func() {
+ res, err := client.JSONSet(ctx, "del1", "$", `{"a": 1, "nested": {"a": 2, "b": 3}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err := client.JSONDel(ctx, "del1", "$..a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(2)))
+
+ resGet, err := client.JSONGet(ctx, "del1", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested":{"b":3}}]`))
+
+ res, err = client.JSONSet(ctx, "del2", "$", `{"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [true, "a", "b"]}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err = client.JSONDel(ctx, "del2", "$..a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(1)))
+
+ resGet, err = client.JSONGet(ctx, "del2", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested":{"b":[true,"a","b"]},"b":["a","b"]}]`))
+
+ doc := `[
+ {
+ "ciao": ["non ancora"],
+ "nested": [
+ {"ciao": [1, "a"]},
+ {"ciao": [2, "a"]},
+ {"ciaoc": [3, "non", "ciao"]},
+ {"ciao": [4, "a"]},
+ {"e": [5, "non", "ciao"]}
+ ]
+ }
+ ]`
+ res, err = client.JSONSet(ctx, "del3", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err = client.JSONDel(ctx, "del3", `$.[0]["nested"]..ciao`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(3)))
+
+ resVal := `[[{"ciao":["non ancora"],"nested":[{},{},{"ciaoc":[3,"non","ciao"]},{},{"e":[5,"non","ciao"]}]}]]`
+ resGet, err = client.JSONGet(ctx, "del3", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(resVal))
+ })
+
+ It("should JSONForget", Label("json.forget", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "forget3", "$", `{"a": [1,2,3], "b": {"a": [1,2,3], "b": "annie"}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONForget(ctx, "forget3", "$..a")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal(int64(2)))
+
+ cmd3 := client.JSONGet(ctx, "forget3", "$")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(Equal(`[{"b":{"b":"annie"}}]`))
+ })
+
+ It("should JSONForget with $", Label("json.forget", "json"), func() {
+ res, err := client.JSONSet(ctx, "doc1", "$", `{"a": 1, "nested": {"a": 2, "b": 3}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err := client.JSONForget(ctx, "doc1", "$..a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(2)))
+
+ resGet, err := client.JSONGet(ctx, "doc1", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested":{"b":3}}]`))
+
+ res, err = client.JSONSet(ctx, "doc2", "$", `{"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [true, "a", "b"]}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err = client.JSONForget(ctx, "doc2", "$..a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(1)))
+
+ resGet, err = client.JSONGet(ctx, "doc2", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested":{"b":[true,"a","b"]},"b":["a","b"]}]`))
+
+ doc := `[
+ {
+ "ciao": ["non ancora"],
+ "nested": [
+ {"ciao": [1, "a"]},
+ {"ciao": [2, "a"]},
+ {"ciaoc": [3, "non", "ciao"]},
+ {"ciao": [4, "a"]},
+ {"e": [5, "non", "ciao"]}
+ ]
+ }
+ ]`
+ res, err = client.JSONSet(ctx, "doc3", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err = client.JSONForget(ctx, "doc3", `$.[0]["nested"]..ciao`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(3)))
+
+ resVal := `[[{"ciao":["non ancora"],"nested":[{},{},{"ciaoc":[3,"non","ciao"]},{},{"e":[5,"non","ciao"]}]}]]`
+ resGet, err = client.JSONGet(ctx, "doc3", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(resVal))
+ })
+
+ It("should JSONNumIncrBy", Label("json.numincrby", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "incr3", "$", `{"a": [1, 2], "b": {"a": [0, -1]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONNumIncrBy(ctx, "incr3", "$..a[1]", float64(1))
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal(`[3,0]`))
+ })
+
+ It("should JSONNumIncrBy with $", Label("json.numincrby", "json"), func() {
+ res, err := client.JSONSet(ctx, "doc1", "$", `{"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONNumIncrBy(ctx, "doc1", "$.b[1].a", 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[7]`))
+
+ res, err = client.JSONNumIncrBy(ctx, "doc1", "$.b[1].a", 3.5).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[10.5]`))
+
+ res, err = client.JSONSet(ctx, "doc2", "$", `{"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONNumIncrBy(ctx, "doc2", "$.b[0].a", 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[5]`))
+ })
+
+ It("should JSONObjKeys", Label("json.objkeys", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "objkeys1", "$", `{"a": [1, 2], "b": {"a": [0, -1]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONObjKeys(ctx, "objkeys1", "$..*")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(HaveLen(7))
+ Expect(cmd2.Val()).To(Equal([]interface{}{nil, []interface{}{"a"}, nil, nil, nil, nil, nil}))
+ })
+
+ It("should JSONObjKeys with $", Label("json.objkeys", "json"), func() {
+ doc := `{
+ "nested1": {"a": {"foo": 10, "bar": 20}},
+ "a": ["foo"],
+ "nested2": {"a": {"baz": 50}}
+ }`
+ cmd1, err := client.JSONSet(ctx, "objkeys1", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd1).To(Equal("OK"))
+
+ cmd2, err := client.JSONObjKeys(ctx, "objkeys1", "$.nested1.a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd2).To(Equal([]interface{}{[]interface{}{"foo", "bar"}}))
+
+ cmd2, err = client.JSONObjKeys(ctx, "objkeys1", ".*.a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd2).To(Equal([]interface{}{"foo", "bar"}))
+
+ cmd2, err = client.JSONObjKeys(ctx, "objkeys1", ".nested2.a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd2).To(Equal([]interface{}{"baz"}))
+
+ _, err = client.JSONObjKeys(ctx, "non_existing_doc", "..a").Result()
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("should JSONObjLen", Label("json.objlen", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "objlen2", "$", `{"a": [1, 2], "b": {"a": [0, -1]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONObjLen(ctx, "objlen2", "$..*")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(HaveLen(7))
+ Expect(cmd2.Val()[0]).To(BeNil())
+ Expect(*cmd2.Val()[1]).To(Equal(int64(1)))
+ })
+
+ It("should JSONStrLen", Label("json.strlen", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "strlen2", "$", `{"a": "alice", "b": "bob", "c": {"a": "alice", "b": "bob"}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONStrLen(ctx, "strlen2", "$..*")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(HaveLen(5))
+ var tmp int64 = 20
+ Expect(cmd2.Val()[0]).To(BeAssignableToTypeOf(&tmp))
+ Expect(*cmd2.Val()[0]).To(Equal(int64(5)))
+ Expect(*cmd2.Val()[1]).To(Equal(int64(3)))
+ Expect(cmd2.Val()[2]).To(BeNil())
+ Expect(*cmd2.Val()[3]).To(Equal(int64(5)))
+ Expect(*cmd2.Val()[4]).To(Equal(int64(3)))
+ })
+
+ It("should JSONStrAppend", Label("json.strappend", "json"), func() {
+ cmd1, err := client.JSONSet(ctx, "strapp1", "$", `"foo"`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd1).To(Equal("OK"))
+ cmd2, err := client.JSONStrAppend(ctx, "strapp1", "$", `"bar"`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(*cmd2[0]).To(Equal(int64(6)))
+ cmd3, err := client.JSONGet(ctx, "strapp1", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd3).To(Equal(`["foobar"]`))
+ })
+
+ It("should JSONStrAppend and JSONStrLen with $", Label("json.strappend", "json.strlen", "json"), func() {
+ res, err := client.JSONSet(ctx, "doc1", "$", `{"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ intArrayResult, err := client.JSONStrAppend(ctx, "doc1", "$.nested1.a", `"baz"`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(*intArrayResult[0]).To(Equal(int64(8)))
+
+ res, err = client.JSONSet(ctx, "doc2", "$", `{"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ intResult, err := client.JSONStrLen(ctx, "doc2", "$.nested1.a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(*intResult[0]).To(Equal(int64(5)))
+ })
+
+ It("should JSONToggle", Label("json.toggle", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "toggle1", "$", `[true]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONToggle(ctx, "toggle1", "$[0]")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(HaveLen(1))
+ Expect(*cmd2.Val()[0]).To(Equal(int64(0)))
+ })
+
+ It("should JSONType", Label("json.type", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "type1", "$", `[true]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONType(ctx, "type1", "$[0]")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(HaveLen(1))
+ // RESP2 v RESP3
+ Expect(cmd2.Val()[0]).To(Or(Equal([]interface{}{"boolean"}), Equal("boolean")))
+ })
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/list_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/list_commands.go
new file mode 100644
index 0000000..24a0de0
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/list_commands.go
@@ -0,0 +1,289 @@
+package redis
+
+import (
+ "context"
+ "strings"
+ "time"
+)
+
+type ListCmdable interface {
+ BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd
+ BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
+ LIndex(ctx context.Context, key string, index int64) *StringCmd
+ LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
+ LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LLen(ctx context.Context, key string) *IntCmd
+ LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd
+ LPop(ctx context.Context, key string) *StringCmd
+ LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+ LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
+ LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
+ LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
+ LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
+ LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
+ RPop(ctx context.Context, key string) *StringCmd
+ RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+ RPopLPush(ctx context.Context, source, destination string) *StringCmd
+ RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
+ BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
+}
+
+func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "blpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd {
+ args := make([]interface{}, 3+len(keys), 6+len(keys))
+ args[0] = "blmpop"
+ args[1] = formatSec(ctx, timeout)
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ args = append(args, strings.ToLower(direction), "count", count)
+ cmd := NewKeyValuesCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "brpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(keys)+1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
+ cmd := NewStringCmd(
+ ctx,
+ "brpoplpush",
+ source,
+ destination,
+ formatSec(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "lindex", key, index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// LMPop Pops one or more elements from the first non-empty list key from the list of provided key names.
+// direction: left or right, count: > 0
+// example: client.LMPop(ctx, "left", 3, "key1", "key2")
+func (c cmdable) LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd {
+ args := make([]interface{}, 2+len(keys), 5+len(keys))
+ args[0] = "lmpop"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ args = append(args, strings.ToLower(direction), "count", count)
+ cmd := NewKeyValuesCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "llen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "lpop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type LPosArgs struct {
+ Rank, MaxLen int64
+}
+
+func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
+ args := []interface{}{"lpos", key, value}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
+ args := []interface{}{"lpos", key, value, "count", count}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ ctx,
+ "lrange",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "lrem", key, count, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "lset", key, index, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "ltrim",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "rpop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BLMove(
+ ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
+) *StringCmd {
+ cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/main_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/main_test.go
index 5414310..19e9444 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/main_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/main_test.go
@@ -1,26 +1,23 @@
package redis_test
import (
- "context"
- "errors"
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
+ "strconv"
"sync"
"testing"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
const (
- redisPort = "6380"
- redisAddr = ":" + redisPort
redisSecondaryPort = "6381"
)
@@ -41,6 +38,16 @@ const (
)
var (
+ redisPort = "6380"
+ redisAddr = ":" + redisPort
+)
+
+var (
+ rediStackPort = "6379"
+ rediStackAddr = ":" + rediStackPort
+)
+
+var (
sentinelAddrs = []string{":" + sentinelPort1, ":" + sentinelPort2, ":" + sentinelPort3}
processes map[string]*redisProcess
@@ -58,6 +65,8 @@ var cluster = &clusterScenario{
clients: make(map[string]*redis.Client, 6),
}
+var RECluster = false
+
func registerProcess(port string, p *redisProcess) {
if processes == nil {
processes = make(map[string]*redisProcess)
@@ -66,48 +75,62 @@ func registerProcess(port string, p *redisProcess) {
}
var _ = BeforeSuite(func() {
+ addr := os.Getenv("REDIS_PORT")
+ if addr != "" {
+ redisPort = addr
+ redisAddr = ":" + redisPort
+ }
var err error
+ RECluster, _ = strconv.ParseBool(os.Getenv("RE_CLUSTER"))
- redisMain, err = startRedis(redisPort)
- Expect(err).NotTo(HaveOccurred())
+ if !RECluster {
- ringShard1, err = startRedis(ringShard1Port)
- Expect(err).NotTo(HaveOccurred())
+ redisMain, err = startRedis(redisPort)
+ Expect(err).NotTo(HaveOccurred())
- ringShard2, err = startRedis(ringShard2Port)
- Expect(err).NotTo(HaveOccurred())
+ ringShard1, err = startRedis(ringShard1Port)
+ Expect(err).NotTo(HaveOccurred())
- ringShard3, err = startRedis(ringShard3Port)
- Expect(err).NotTo(HaveOccurred())
+ ringShard2, err = startRedis(ringShard2Port)
+ Expect(err).NotTo(HaveOccurred())
- sentinelMaster, err = startRedis(sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ ringShard3, err = startRedis(ringShard3Port)
+ Expect(err).NotTo(HaveOccurred())
- sentinel1, err = startSentinel(sentinelPort1, sentinelName, sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ sentinelMaster, err = startRedis(sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
- sentinel2, err = startSentinel(sentinelPort2, sentinelName, sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ sentinel1, err = startSentinel(sentinelPort1, sentinelName, sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
- sentinel3, err = startSentinel(sentinelPort3, sentinelName, sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ sentinel2, err = startSentinel(sentinelPort2, sentinelName, sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
- sentinelSlave1, err = startRedis(
- sentinelSlave1Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ sentinel3, err = startSentinel(sentinelPort3, sentinelName, sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
- sentinelSlave2, err = startRedis(
- sentinelSlave2Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ sentinelSlave1, err = startRedis(
+ sentinelSlave1Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
- Expect(startCluster(ctx, cluster)).NotTo(HaveOccurred())
+ sentinelSlave2, err = startRedis(
+ sentinelSlave2Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(startCluster(ctx, cluster)).NotTo(HaveOccurred())
+ } else {
+ redisPort = rediStackPort
+ redisAddr = rediStackAddr
+ }
})
var _ = AfterSuite(func() {
- Expect(cluster.Close()).NotTo(HaveOccurred())
+ if !RECluster {
+ Expect(cluster.Close()).NotTo(HaveOccurred())
- for _, p := range processes {
- Expect(p.Close()).NotTo(HaveOccurred())
+ for _, p := range processes {
+ Expect(p.Close()).NotTo(HaveOccurred())
+ }
}
processes = nil
})
@@ -120,20 +143,37 @@ func TestGinkgoSuite(t *testing.T) {
//------------------------------------------------------------------------------
func redisOptions() *redis.Options {
+ if RECluster {
+ return &redis.Options{
+ Addr: redisAddr,
+ DB: 0,
+
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+ ContextTimeoutEnabled: true,
+
+ MaxRetries: -1,
+ PoolSize: 10,
+
+ PoolTimeout: 30 * time.Second,
+ ConnMaxIdleTime: time.Minute,
+ }
+ }
return &redis.Options{
Addr: redisAddr,
DB: 15,
- DialTimeout: 10 * time.Second,
- ReadTimeout: 30 * time.Second,
- WriteTimeout: 30 * time.Second,
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+ ContextTimeoutEnabled: true,
MaxRetries: -1,
- PoolSize: 10,
- PoolTimeout: 30 * time.Second,
- IdleTimeout: time.Minute,
- IdleCheckFrequency: 100 * time.Millisecond,
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ ConnMaxIdleTime: time.Minute,
}
}
@@ -145,10 +185,9 @@ func redisClusterOptions() *redis.ClusterOptions {
MaxRedirects: 8,
- PoolSize: 10,
- PoolTimeout: 30 * time.Second,
- IdleTimeout: time.Minute,
- IdleCheckFrequency: 100 * time.Millisecond,
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ ConnMaxIdleTime: time.Minute,
}
}
@@ -165,10 +204,9 @@ func redisRingOptions() *redis.RingOptions {
MaxRetries: -1,
- PoolSize: 10,
- PoolTimeout: 30 * time.Second,
- IdleTimeout: time.Minute,
- IdleCheckFrequency: 100 * time.Millisecond,
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ ConnMaxIdleTime: time.Minute,
}
}
@@ -272,7 +310,7 @@ func (p *redisProcess) Close() error {
if err := p.Client.Ping(ctx).Err(); err != nil {
return nil
}
- return errors.New("client is not shutdown")
+ return fmt.Errorf("client %s is not shutdown", p.Options().Addr)
}, 10*time.Second)
if err != nil {
return err
@@ -283,8 +321,9 @@ func (p *redisProcess) Close() error {
}
var (
- redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
- redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+ redisSentinelConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "sentinel.conf"))
)
func redisDir(port string) (string, error) {
@@ -306,11 +345,12 @@ func startRedis(port string, args ...string) (*redisProcess, error) {
if err != nil {
return nil, err
}
- if err = exec.Command("cp", "-f", redisServerConf, dir).Run(); err != nil {
+
+ if err := exec.Command("cp", "-f", redisServerConf, dir).Run(); err != nil {
return nil, err
}
- baseArgs := []string{filepath.Join(dir, "redis.conf"), "--port", port, "--dir", dir}
+ baseArgs := []string{filepath.Join(dir, "redis.conf"), "--port", port, "--dir", dir, "--enable-module-command", "yes"}
process, err := execCmd(redisServerBin, append(baseArgs, args...)...)
if err != nil {
return nil, err
@@ -324,7 +364,7 @@ func startRedis(port string, args ...string) (*redisProcess, error) {
p := &redisProcess{process, client}
registerProcess(port, p)
- return p, err
+ return p, nil
}
func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
@@ -333,7 +373,12 @@ func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
return nil, err
}
- process, err := execCmd(redisServerBin, os.DevNull, "--sentinel", "--port", port, "--dir", dir)
+ sentinelConf := filepath.Join(dir, "sentinel.conf")
+ if err := os.WriteFile(sentinelConf, nil, 0o644); err != nil {
+ return nil, err
+ }
+
+ process, err := execCmd(redisServerBin, sentinelConf, "--sentinel", "--port", port, "--dir", dir)
if err != nil {
return nil, err
}
@@ -355,7 +400,7 @@ func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
client.Process(ctx, cmd)
if err := cmd.Err(); err != nil {
process.Kill()
- return nil, err
+ return nil, fmt.Errorf("%s failed: %w", cmd, err)
}
}
@@ -412,37 +457,28 @@ func (cn *badConn) Write([]byte) (int, error) {
//------------------------------------------------------------------------------
type hook struct {
- beforeProcess func(ctx context.Context, cmd redis.Cmder) (context.Context, error)
- afterProcess func(ctx context.Context, cmd redis.Cmder) error
-
- beforeProcessPipeline func(ctx context.Context, cmds []redis.Cmder) (context.Context, error)
- afterProcessPipeline func(ctx context.Context, cmds []redis.Cmder) error
+ dialHook func(hook redis.DialHook) redis.DialHook
+ processHook func(hook redis.ProcessHook) redis.ProcessHook
+ processPipelineHook func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook
}
-func (h *hook) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- if h.beforeProcess != nil {
- return h.beforeProcess(ctx, cmd)
+func (h *hook) DialHook(hook redis.DialHook) redis.DialHook {
+ if h.dialHook != nil {
+ return h.dialHook(hook)
}
- return ctx, nil
+ return hook
}
-func (h *hook) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
- if h.afterProcess != nil {
- return h.afterProcess(ctx, cmd)
+func (h *hook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook {
+ if h.processHook != nil {
+ return h.processHook(hook)
}
- return nil
+ return hook
}
-func (h *hook) BeforeProcessPipeline(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- if h.beforeProcessPipeline != nil {
- return h.beforeProcessPipeline(ctx, cmds)
+func (h *hook) ProcessPipelineHook(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ if h.processPipelineHook != nil {
+ return h.processPipelineHook(hook)
}
- return ctx, nil
-}
-
-func (h *hook) AfterProcessPipeline(ctx context.Context, cmds []redis.Cmder) error {
- if h.afterProcessPipeline != nil {
- return h.afterProcessPipeline(ctx, cmds)
- }
- return nil
+ return hook
}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/monitor_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/monitor_test.go
new file mode 100644
index 0000000..1bc82ec
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/monitor_test.go
@@ -0,0 +1,48 @@
+package redis_test
+
+import (
+ "context"
+ "time"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9"
+)
+
+var _ = Describe("Monitor command", Label("monitor"), func() {
+ ctx := context.TODO()
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(&redis.Options{Addr: ":6379"})
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should monitor", Label("monitor"), func() {
+ ress := make(chan string)
+ client1 := redis.NewClient(&redis.Options{Addr: rediStackAddr})
+ mn := client1.Monitor(ctx, ress)
+ mn.Start()
+ // Wait for the Redis server to be in monitoring mode.
+ time.Sleep(100 * time.Millisecond)
+ client.Set(ctx, "foo", "bar", 0)
+ client.Set(ctx, "bar", "baz", 0)
+ client.Set(ctx, "bap", 8, 0)
+ client.Get(ctx, "bap")
+ lst := []string{}
+ for i := 0; i < 5; i++ {
+ s := <-ress
+ lst = append(lst, s)
+ }
+ mn.Stop()
+ Expect(lst[0]).To(ContainSubstring("OK"))
+ Expect(lst[1]).To(ContainSubstring(`"set" "foo" "bar"`))
+ Expect(lst[2]).To(ContainSubstring(`"set" "bar" "baz"`))
+ Expect(lst[3]).To(ContainSubstring(`"set" "bap" "8"`))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options.go
index a4abe32..dff52ae 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options.go
@@ -13,7 +13,7 @@ import (
"strings"
"time"
- "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/redis/go-redis/v9/internal/pool"
)
// Limiter is the interface of a rate limiter or a circuit breaker.
@@ -27,7 +27,7 @@ type Limiter interface {
ReportResult(result error)
}
-// Options keeps the settings to setup redis connection.
+// Options keeps the settings to set up redis connection.
type Options struct {
// The network type, either tcp or unix.
// Default is tcp.
@@ -35,6 +35,9 @@ type Options struct {
// host:port address.
Addr string
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
// Dialer creates new network connection and has priority over
// Network and Addr options.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
@@ -42,6 +45,9 @@ type Options struct {
// Hook that is called when new connection is established.
OnConnect func(ctx context.Context, cn *Conn) error
+ // Protocol 2 or 3. Use the version to negotiate RESP version with redis-server.
+ // Default is 3.
+ Protocol int
// Use the specified Username to authenticate the current connection
// with one of the connections defined in the ACL list when connecting
// to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
@@ -51,6 +57,9 @@ type Options struct {
// or the User Password when connecting to a Redis 6.0 instance, or greater,
// that is using the Redis ACL system.
Password string
+ // CredentialsProvider allows the username and password to be updated
+ // before reconnecting. It should return the current username and password.
+ CredentialsProvider func() (username string, password string)
// Database to be selected after connecting to the server.
DB int
@@ -69,49 +78,75 @@ type Options struct {
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads. If reached, commands will fail
- // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
- // Default is 3 seconds.
+ // with a timeout instead of blocking. Supported values:
+ // - `0` - default timeout (3 seconds).
+ // - `-1` - no timeout (block indefinitely).
+ // - `-2` - disables SetReadDeadline calls completely.
ReadTimeout time.Duration
// Timeout for socket writes. If reached, commands will fail
- // with a timeout instead of blocking.
- // Default is ReadTimeout.
+ // with a timeout instead of blocking. Supported values:
+ // - `0` - default timeout (3 seconds).
+ // - `-1` - no timeout (block indefinitely).
+ // - `-2` - disables SetWriteDeadline calls completely.
WriteTimeout time.Duration
+ // ContextTimeoutEnabled controls whether the client respects context timeouts and deadlines.
+ // See https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts
+ ContextTimeoutEnabled bool
// Type of connection pool.
// true for FIFO pool, false for LIFO pool.
- // Note that fifo has higher overhead compared to lifo.
+ // Note that FIFO has slightly higher overhead compared to LIFO,
+ // but it helps closing idle connections faster reducing the pool size.
PoolFIFO bool
- // Maximum number of socket connections.
+ // Base number of socket connections.
// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
+ // If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize,
+ // you can limit it through MaxActiveConns
PoolSize int
- // Minimum number of idle connections which is useful when establishing
- // new connection is slow.
- MinIdleConns int
- // Connection age at which client retires (closes) the connection.
- // Default is to not close aged connections.
- MaxConnAge time.Duration
// Amount of time client waits for connection if all connections
// are busy before returning an error.
// Default is ReadTimeout + 1 second.
PoolTimeout time.Duration
- // Amount of time after which client closes idle connections.
+ // Minimum number of idle connections which is useful when establishing
+ // new connection is slow.
+ // Default is 0. the idle connections are not closed by default.
+ MinIdleConns int
+ // Maximum number of idle connections.
+ // Default is 0. the idle connections are not closed by default.
+ MaxIdleConns int
+ // Maximum number of connections allocated by the pool at a given time.
+ // When zero, there is no limit on the number of connections in the pool.
+ MaxActiveConns int
+ // ConnMaxIdleTime is the maximum amount of time a connection may be idle.
// Should be less than server's timeout.
- // Default is 5 minutes. -1 disables idle timeout check.
- IdleTimeout time.Duration
- // Frequency of idle checks made by idle connections reaper.
- // Default is 1 minute. -1 disables idle connections reaper,
- // but idle connections are still discarded by the client
- // if IdleTimeout is set.
- IdleCheckFrequency time.Duration
-
- // Enables read only queries on slave nodes.
- readOnly bool
-
- // TLS Config to use. When set TLS will be negotiated.
+ //
+ // Expired connections may be closed lazily before reuse.
+ // If d <= 0, connections are not closed due to a connection's idle time.
+ //
+ // Default is 30 minutes. -1 disables idle timeout check.
+ ConnMaxIdleTime time.Duration
+ // ConnMaxLifetime is the maximum amount of time a connection may be reused.
+ //
+ // Expired connections may be closed lazily before reuse.
+ // If <= 0, connections are not closed due to a connection's age.
+ //
+ // Default is to not close idle connections.
+ ConnMaxLifetime time.Duration
+
+ // TLS Config to use. When set, TLS will be negotiated.
TLSConfig *tls.Config
- // Limiter interface used to implemented circuit breaker or rate limiter.
+ // Limiter interface used to implement circuit breaker or rate limiter.
Limiter Limiter
+
+ // Enables read only queries on slave/follower nodes.
+ readOnly bool
+
+ // Disable set-lib on connect. Default is false.
+ DisableIndentity bool
+
+ // Add suffix to client name. Default is empty.
+ IdentitySuffix string
}
func (opt *Options) init() {
@@ -129,40 +164,36 @@ func (opt *Options) init() {
opt.DialTimeout = 5 * time.Second
}
if opt.Dialer == nil {
- opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
- netDialer := &net.Dialer{
- Timeout: opt.DialTimeout,
- KeepAlive: 5 * time.Minute,
- }
- if opt.TLSConfig == nil {
- return netDialer.DialContext(ctx, network, addr)
- }
- return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
- }
+ opt.Dialer = NewDialer(opt)
}
if opt.PoolSize == 0 {
opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
}
switch opt.ReadTimeout {
+ case -2:
+ opt.ReadTimeout = -1
case -1:
opt.ReadTimeout = 0
case 0:
opt.ReadTimeout = 3 * time.Second
}
switch opt.WriteTimeout {
+ case -2:
+ opt.WriteTimeout = -1
case -1:
opt.WriteTimeout = 0
case 0:
opt.WriteTimeout = opt.ReadTimeout
}
if opt.PoolTimeout == 0 {
- opt.PoolTimeout = opt.ReadTimeout + time.Second
- }
- if opt.IdleTimeout == 0 {
- opt.IdleTimeout = 5 * time.Minute
+ if opt.ReadTimeout > 0 {
+ opt.PoolTimeout = opt.ReadTimeout + time.Second
+ } else {
+ opt.PoolTimeout = 30 * time.Second
+ }
}
- if opt.IdleCheckFrequency == 0 {
- opt.IdleCheckFrequency = time.Minute
+ if opt.ConnMaxIdleTime == 0 {
+ opt.ConnMaxIdleTime = 30 * time.Minute
}
if opt.MaxRetries == -1 {
@@ -189,36 +220,57 @@ func (opt *Options) clone() *Options {
return &clone
}
+// NewDialer returns a function that will be used as the default dialer
+// when none is specified in Options.Dialer.
+func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, error) {
+ return func(ctx context.Context, network, addr string) (net.Conn, error) {
+ netDialer := &net.Dialer{
+ Timeout: opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
+ }
+}
+
// ParseURL parses an URL into Options that can be used to connect to Redis.
// Scheme is required.
// There are two connection types: by tcp socket and by unix socket.
// Tcp connection:
-// redis://<user>:<password>@<host>:<port>/<db_number>
+//
+// redis://<user>:<password>@<host>:<port>/<db_number>
+//
// Unix connection:
-// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+//
+// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+//
// Most Option fields can be set using query parameters, with the following restrictions:
-// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
-// - only scalar type fields are supported (bool, int, time.Duration)
-// - for time.Duration fields, values must be a valid input for time.ParseDuration();
-// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
-// - to disable a duration field, use value less than or equal to 0; to use the default
-// value, leave the value blank or remove the parameter
-// - only the last value is interpreted if a parameter is given multiple times
-// - fields "network", "addr", "username" and "password" can only be set using other
-// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
-// names will be treated as unknown parameters
-// - unknown parameter names will result in an error
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+//
// Examples:
-// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
-// is equivalent to:
-// &Options{
-// Network: "tcp",
-// Addr: "localhost:6789",
-// DB: 1, // path "/3" was overridden by "&db=1"
-// DialTimeout: 3 * time.Second, // no time unit = seconds
-// ReadTimeout: 6 * time.Second,
-// MaxRetries: 2,
-// }
+//
+// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
+// is equivalent to:
+// &Options{
+// Network: "tcp",
+// Addr: "localhost:6789",
+// DB: 1, // path "/3" was overridden by "&db=1"
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// MaxRetries: 2,
+// }
func ParseURL(redisURL string) (*Options, error) {
u, err := url.Parse(redisURL)
if err != nil {
@@ -240,16 +292,7 @@ func setupTCPConn(u *url.URL) (*Options, error) {
o.Username, o.Password = getUserPassword(u)
- h, p, err := net.SplitHostPort(u.Host)
- if err != nil {
- h = u.Host
- }
- if h == "" {
- h = "localhost"
- }
- if p == "" {
- p = "6379"
- }
+ h, p := getHostPortWithDefaults(u)
o.Addr = net.JoinHostPort(h, p)
f := strings.FieldsFunc(u.Path, func(r rune) bool {
@@ -259,6 +302,7 @@ func setupTCPConn(u *url.URL) (*Options, error) {
case 0:
o.DB = 0
case 1:
+ var err error
if o.DB, err = strconv.Atoi(f[0]); err != nil {
return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
}
@@ -267,12 +311,32 @@ func setupTCPConn(u *url.URL) (*Options, error) {
}
if u.Scheme == "rediss" {
- o.TLSConfig = &tls.Config{ServerName: h}
+ o.TLSConfig = &tls.Config{
+ ServerName: h,
+ MinVersion: tls.VersionTLS12,
+ }
}
return setupConnParams(u, o)
}
+// getHostPortWithDefaults is a helper function that splits the url into
+// a host and a port. If the host is missing, it defaults to localhost
+// and if the port is missing, it defaults to 6379.
+func getHostPortWithDefaults(u *url.URL) (string, string) {
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ host = u.Host
+ }
+ if host == "" {
+ host = "localhost"
+ }
+ if port == "" {
+ port = "6379"
+ }
+ return host, port
+}
+
func setupUnixConn(u *url.URL) (*Options, error) {
o := &Options{
Network: "unix",
@@ -291,6 +355,10 @@ type queryOptions struct {
err error
}
+func (o *queryOptions) has(name string) bool {
+ return len(o.q[name]) > 0
+}
+
func (o *queryOptions) string(name string) string {
vs := o.q[name]
if len(vs) == 0 {
@@ -300,6 +368,12 @@ func (o *queryOptions) string(name string) string {
return vs[len(vs)-1]
}
+func (o *queryOptions) strings(name string) []string {
+ vs := o.q[name]
+ delete(o.q, name)
+ return vs
+}
+
func (o *queryOptions) int(name string) int {
s := o.string(name)
if s == "" {
@@ -377,6 +451,8 @@ func setupConnParams(u *url.URL, o *Options) (*Options, error) {
o.DB = db
}
+ o.Protocol = q.int("protocol")
+ o.ClientName = q.string("client_name")
o.MaxRetries = q.int("max_retries")
o.MinRetryBackoff = q.duration("min_retry_backoff")
o.MaxRetryBackoff = q.duration("max_retry_backoff")
@@ -385,11 +461,20 @@ func setupConnParams(u *url.URL, o *Options) (*Options, error) {
o.WriteTimeout = q.duration("write_timeout")
o.PoolFIFO = q.bool("pool_fifo")
o.PoolSize = q.int("pool_size")
- o.MinIdleConns = q.int("min_idle_conns")
- o.MaxConnAge = q.duration("max_conn_age")
o.PoolTimeout = q.duration("pool_timeout")
- o.IdleTimeout = q.duration("idle_timeout")
- o.IdleCheckFrequency = q.duration("idle_check_frequency")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxIdleConns = q.int("max_idle_conns")
+ o.MaxActiveConns = q.int("max_active_conns")
+ if q.has("conn_max_idle_time") {
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+ } else {
+ o.ConnMaxIdleTime = q.duration("idle_timeout")
+ }
+ if q.has("conn_max_lifetime") {
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ } else {
+ o.ConnMaxLifetime = q.duration("max_conn_age")
+ }
if q.err != nil {
return nil, q.err
}
@@ -413,17 +498,21 @@ func getUserPassword(u *url.URL) (string, string) {
return user, password
}
-func newConnPool(opt *Options) *pool.ConnPool {
+func newConnPool(
+ opt *Options,
+ dialer func(ctx context.Context, network, addr string) (net.Conn, error),
+) *pool.ConnPool {
return pool.NewConnPool(&pool.Options{
Dialer: func(ctx context.Context) (net.Conn, error) {
- return opt.Dialer(ctx, opt.Network, opt.Addr)
+ return dialer(ctx, opt.Network, opt.Addr)
},
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
})
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options_test.go
index 1450523..1db36fd 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options_test.go
@@ -1,5 +1,4 @@
//go:build go1.7
-// +build go1.7
package redis
@@ -46,19 +45,25 @@ func TestParseURL(t *testing.T) {
o: &Options{Addr: "localhost:123", DB: 2, ReadTimeout: 2 * time.Second, PoolFIFO: true},
}, {
// special case handling for disabled timeouts
- url: "redis://localhost:123/?db=2&idle_timeout=0",
- o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: -1},
+ url: "redis://localhost:123/?db=2&conn_max_idle_time=0",
+ o: &Options{Addr: "localhost:123", DB: 2, ConnMaxIdleTime: -1},
}, {
// negative values disable timeouts as well
- url: "redis://localhost:123/?db=2&idle_timeout=-1",
- o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: -1},
+ url: "redis://localhost:123/?db=2&conn_max_idle_time=-1",
+ o: &Options{Addr: "localhost:123", DB: 2, ConnMaxIdleTime: -1},
}, {
// absent timeout values will use defaults
- url: "redis://localhost:123/?db=2&idle_timeout=",
- o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: 0},
+ url: "redis://localhost:123/?db=2&conn_max_idle_time=",
+ o: &Options{Addr: "localhost:123", DB: 2, ConnMaxIdleTime: 0},
}, {
- url: "redis://localhost:123/?db=2&idle_timeout", // missing "=" at the end
- o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: 0},
+ url: "redis://localhost:123/?db=2&conn_max_idle_time", // missing "=" at the end
+ o: &Options{Addr: "localhost:123", DB: 2, ConnMaxIdleTime: 0},
+ }, {
+ url: "redis://localhost:123/?db=2&client_name=hi", // client name
+ o: &Options{Addr: "localhost:123", DB: 2, ClientName: "hi"},
+ }, {
+ url: "redis://localhost:123/?db=2&protocol=2", // RESP Protocol
+ o: &Options{Addr: "localhost:123", DB: 2, Protocol: 2},
}, {
url: "unix:///tmp/redis.sock",
o: &Options{Addr: "/tmp/redis.sock"},
@@ -174,20 +179,20 @@ func comprareOptions(t *testing.T, actual, expected *Options) {
if actual.PoolSize != expected.PoolSize {
t.Errorf("PoolSize: got %v, expected %v", actual.PoolSize, expected.PoolSize)
}
+ if actual.PoolTimeout != expected.PoolTimeout {
+ t.Errorf("PoolTimeout: got %v, expected %v", actual.PoolTimeout, expected.PoolTimeout)
+ }
if actual.MinIdleConns != expected.MinIdleConns {
t.Errorf("MinIdleConns: got %v, expected %v", actual.MinIdleConns, expected.MinIdleConns)
}
- if actual.MaxConnAge != expected.MaxConnAge {
- t.Errorf("MaxConnAge: got %v, expected %v", actual.MaxConnAge, expected.MaxConnAge)
- }
- if actual.PoolTimeout != expected.PoolTimeout {
- t.Errorf("PoolTimeout: got %v, expected %v", actual.PoolTimeout, expected.PoolTimeout)
+ if actual.MaxIdleConns != expected.MaxIdleConns {
+ t.Errorf("MaxIdleConns: got %v, expected %v", actual.MaxIdleConns, expected.MaxIdleConns)
}
- if actual.IdleTimeout != expected.IdleTimeout {
- t.Errorf("IdleTimeout: got %v, expected %v", actual.IdleTimeout, expected.IdleTimeout)
+ if actual.ConnMaxIdleTime != expected.ConnMaxIdleTime {
+ t.Errorf("ConnMaxIdleTime: got %v, expected %v", actual.ConnMaxIdleTime, expected.ConnMaxIdleTime)
}
- if actual.IdleCheckFrequency != expected.IdleCheckFrequency {
- t.Errorf("IdleCheckFrequency: got %v, expected %v", actual.IdleCheckFrequency, expected.IdleCheckFrequency)
+ if actual.ConnMaxLifetime != expected.ConnMaxLifetime {
+ t.Errorf("ConnMaxLifetime: got %v, expected %v", actual.ConnMaxLifetime, expected.ConnMaxLifetime)
}
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster.go
index a54f2f3..17f98d9 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster.go
@@ -6,17 +6,19 @@ import (
"fmt"
"math"
"net"
+ "net/url"
"runtime"
"sort"
+ "strings"
"sync"
"sync/atomic"
"time"
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hashtag"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/rand"
)
var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
@@ -27,6 +29,9 @@ type ClusterOptions struct {
// A seed list of host:port addresses of cluster nodes.
Addrs []string
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
// NewClient creates a cluster node client with provided name and options.
NewClient func(opt *Options) *Client
@@ -57,29 +62,33 @@ type ClusterOptions struct {
OnConnect func(ctx context.Context, cn *Conn) error
- Username string
- Password string
+ Protocol int
+ Username string
+ Password string
+ CredentialsProvider func() (username string, password string)
MaxRetries int
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
- PoolFIFO bool
+ PoolFIFO bool
+ PoolSize int // applies per cluster node and not for the whole cluster
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int // applies per cluster node and not for the whole cluster
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
- // PoolSize applies per cluster node and not for the whole cluster.
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
+ TLSConfig *tls.Config
+ DisableIndentity bool // Disable set-lib on connect. Default is false.
- TLSConfig *tls.Config
+ IdentitySuffix string // Add suffix to client name. Default is empty.
}
func (opt *ClusterOptions) init() {
@@ -131,33 +140,163 @@ func (opt *ClusterOptions) init() {
}
}
-func (opt *ClusterOptions) clientOptions() *Options {
- const disableIdleCheck = -1
+// ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis.
+// The URL must be in the form:
+//
+// redis://<user>:<password>@<host>:<port>
+// or
+// rediss://<user>:<password>@<host>:<port>
+//
+// To add additional addresses, specify the query parameter, "addr" one or more times. e.g:
+//
+// redis://<user>:<password>@<host>:<port>?addr=<host2>:<port2>&addr=<host3>:<port3>
+// or
+// rediss://<user>:<password>@<host>:<port>?addr=<host2>:<port2>&addr=<host3>:<port3>
+//
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+//
+// Example:
+//
+// redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791
+// is equivalent to:
+// &ClusterOptions{
+// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"]
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// }
+func ParseClusterURL(redisURL string) (*ClusterOptions, error) {
+ o := &ClusterOptions{}
+
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ // add base URL to the array of addresses
+ // more addresses may be added through the URL params
+ h, p := getHostPortWithDefaults(u)
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+
+ // setup username, password, and other configurations
+ o, err = setupClusterConn(u, h, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+// setupClusterConn gets the username and password from the URL and the query parameters.
+func setupClusterConn(u *url.URL, host string, o *ClusterOptions) (*ClusterOptions, error) {
+ switch u.Scheme {
+ case "rediss":
+ o.TLSConfig = &tls.Config{ServerName: host}
+ fallthrough
+ case "redis":
+ o.Username, o.Password = getUserPassword(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+
+ // retrieve the configuration from the query parameters
+ o, err := setupClusterQueryParams(u, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+
+// setupClusterQueryParams converts query parameters in u to option value in o.
+func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, error) {
+ q := queryOptions{q: u.Query()}
+
+ o.Protocol = q.int("protocol")
+ o.ClientName = q.string("client_name")
+ o.MaxRedirects = q.int("max_redirects")
+ o.ReadOnly = q.bool("read_only")
+ o.RouteByLatency = q.bool("route_by_latency")
+ o.RouteRandomly = q.bool("route_randomly")
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxIdleConns = q.int("max_idle_conns")
+ o.MaxActiveConns = q.int("max_active_conns")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+
+ if q.err != nil {
+ return nil, q.err
+ }
+
+ // addr can be specified as many times as needed
+ addrs := q.strings("addr")
+ for _, addr := range addrs {
+ h, p, err := net.SplitHostPort(addr)
+ if err != nil || h == "" || p == "" {
+ return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr)
+ }
+
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+ }
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
+
+ return o, nil
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
return &Options{
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
+ ClientName: opt.ClientName,
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
- Username: opt.Username,
- Password: opt.Password,
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+ CredentialsProvider: opt.CredentialsProvider,
MaxRetries: opt.MaxRetries,
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: disableIdleCheck,
-
- TLSConfig: opt.TLSConfig,
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ TLSConfig: opt.TLSConfig,
// If ClusterSlots is populated, then we probably have an artificial
// cluster whose nodes are not in clustering mode (otherwise there isn't
// much use for ClusterSlots config). This means we cannot execute the
@@ -204,15 +343,26 @@ func (n *clusterNode) updateLatency() {
const numProbe = 10
var dur uint64
+ successes := 0
for i := 0; i < numProbe; i++ {
time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
start := time.Now()
- n.Client.Ping(context.TODO())
- dur += uint64(time.Since(start) / time.Microsecond)
+ err := n.Client.Ping(context.TODO()).Err()
+ if err == nil {
+ dur += uint64(time.Since(start) / time.Microsecond)
+ successes++
+ }
}
- latency := float64(dur) / float64(numProbe)
+ var latency float64
+ if successes == 0 {
+ // If none of the pings worked, set latency to some arbitrarily high value so this node gets
+ // least priority.
+ latency = float64((1 * time.Minute) / time.Microsecond)
+ } else {
+ latency = float64(dur) / float64(successes)
+ }
atomic.StoreUint32(&n.latency, uint32(latency+0.5))
}
@@ -262,6 +412,7 @@ type clusterNodes struct {
nodes map[string]*clusterNode
activeAddrs []string
closed bool
+ onNewNode []func(rdb *Client)
_generation uint32 // atomic
}
@@ -297,6 +448,12 @@ func (c *clusterNodes) Close() error {
return firstErr
}
+func (c *clusterNodes) OnNewNode(fn func(rdb *Client)) {
+ c.mu.Lock()
+ c.onNewNode = append(c.onNewNode, fn)
+ c.mu.Unlock()
+}
+
func (c *clusterNodes) Addrs() ([]string, error) {
var addrs []string
@@ -374,6 +531,9 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
}
node = newClusterNode(c.opt, addr)
+ for _, fn := range c.onNewNode {
+ fn(node.Client)
+ }
c.addrs = appendIfNotExists(c.addrs, addr)
c.nodes[addr] = node
@@ -683,21 +843,16 @@ func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, er
//------------------------------------------------------------------------------
-type clusterClient struct {
- opt *ClusterOptions
- nodes *clusterNodes
- state *clusterStateHolder //nolint:structcheck
- cmdsInfoCache *cmdsInfoCache //nolint:structcheck
-}
-
// ClusterClient is a Redis Cluster client representing a pool of zero
// or more underlying connections. It's safe for concurrent use by
// multiple goroutines.
type ClusterClient struct {
- *clusterClient
+ opt *ClusterOptions
+ nodes *clusterNodes
+ state *clusterStateHolder
+ cmdsInfoCache *cmdsInfoCache
cmdable
- hooks
- ctx context.Context
+ hooksMixin
}
// NewClusterClient returns a Redis Cluster client as described in
@@ -706,38 +861,24 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
opt.init()
c := &ClusterClient{
- clusterClient: &clusterClient{
- opt: opt,
- nodes: newClusterNodes(opt),
- },
- ctx: context.Background(),
+ opt: opt,
+ nodes: newClusterNodes(opt),
}
+
c.state = newClusterStateHolder(c.loadState)
c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
c.cmdable = c.Process
- if opt.IdleCheckFrequency > 0 {
- go c.reaper(opt.IdleCheckFrequency)
- }
+ c.initHooks(hooks{
+ dial: nil,
+ process: c.process,
+ pipeline: c.processPipeline,
+ txPipeline: c.processTxPipeline,
+ })
return c
}
-func (c *ClusterClient) Context() context.Context {
- return c.ctx
-}
-
-func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
-}
-
// Options returns read-only Options that were used to create the client.
func (c *ClusterClient) Options() *ClusterOptions {
return c.opt
@@ -757,7 +898,7 @@ func (c *ClusterClient) Close() error {
return c.nodes.Close()
}
-// Do creates a Cmd from the args and processes the cmd.
+// Do create a Cmd from the args and processes the cmd.
func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
_ = c.Process(ctx, cmd)
@@ -765,13 +906,13 @@ func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
}
func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.process)
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
}
func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
- cmdInfo := c.cmdInfo(cmd.Name())
- slot := c.cmdSlot(cmd)
-
+ slot := c.cmdSlot(ctx, cmd)
var node *clusterNode
var ask bool
var lastErr error
@@ -784,19 +925,19 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
if node == nil {
var err error
- node, err = c.cmdNode(ctx, cmdInfo, slot)
+ node, err = c.cmdNode(ctx, cmd.Name(), slot)
if err != nil {
return err
}
}
if ask {
+ ask = false
+
pipe := node.Client.Pipeline()
_ = pipe.Process(ctx, NewCmd(ctx, "asking"))
_ = pipe.Process(ctx, cmd)
_, lastErr = pipe.Exec(ctx)
- _ = pipe.Close()
- ask = false
} else {
lastErr = node.Client.Process(ctx, cmd)
}
@@ -851,6 +992,10 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
return lastErr
}
+func (c *ClusterClient) OnNewNode(fn func(rdb *Client)) {
+ c.nodes.OnNewNode(fn)
+}
+
// ForEachMaster concurrently calls the fn on each master node in the cluster.
// It returns the first error if any.
func (c *ClusterClient) ForEachMaster(
@@ -1056,30 +1201,9 @@ func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
return nil, firstErr
}
-// reaper closes idle connections to the cluster.
-func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
- ticker := time.NewTicker(idleCheckFrequency)
- defer ticker.Stop()
-
- for range ticker.C {
- nodes, err := c.nodes.All()
- if err != nil {
- break
- }
-
- for _, node := range nodes {
- _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
- if err != nil {
- internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
- }
- }
- }
-}
-
func (c *ClusterClient) Pipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
+ exec: pipelineExecer(c.processPipelineHook),
}
pipe.init()
return &pipe
@@ -1090,13 +1214,9 @@ func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error)
}
func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
-}
-
-func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
cmdsMap := newCmdsMap()
- err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
- if err != nil {
+
+ if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil {
setCmdsErr(cmds, err)
return err
}
@@ -1116,18 +1236,7 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
wg.Add(1)
go func(node *clusterNode, cmds []Cmder) {
defer wg.Done()
-
- err := c._processPipelineNode(ctx, node, cmds, failedCmds)
- if err == nil {
- return
- }
- if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
- setCmdsErr(cmds, err)
- }
- } else {
- setCmdsErr(cmds, err)
- }
+ c.processPipelineNode(ctx, node, cmds, failedCmds)
}(node, cmds)
}
@@ -1147,9 +1256,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
return err
}
- if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
+ if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) {
for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
+ slot := c.cmdSlot(ctx, cmd)
node, err := c.slotReadOnlyNode(state, slot)
if err != nil {
return err
@@ -1160,7 +1269,7 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
}
for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
+ slot := c.cmdSlot(ctx, cmd)
node, err := state.slotMasterNode(slot)
if err != nil {
return err
@@ -1170,9 +1279,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
return nil
}
-func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool {
for _, cmd := range cmds {
- cmdInfo := c.cmdInfo(cmd.Name())
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
if cmdInfo == nil || !cmdInfo.ReadOnly {
return false
}
@@ -1180,22 +1289,42 @@ func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
return true
}
-func (c *ClusterClient) _processPipelineNode(
+func (c *ClusterClient) processPipelineNode(
ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) {
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processPipelineNodeConn(
+ ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
) error {
- return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return err
- }
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
- return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
- })
- })
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
})
}
@@ -1206,7 +1335,7 @@ func (c *ClusterClient) pipelineReadCmds(
cmds []Cmder,
failedCmds *cmdsMap,
) error {
- for _, cmd := range cmds {
+ for i, cmd := range cmds {
err := cmd.readReply(rd)
cmd.SetErr(err)
@@ -1218,15 +1347,24 @@ func (c *ClusterClient) pipelineReadCmds(
continue
}
- if c.opt.ReadOnly && isLoadingError(err) {
+ if c.opt.ReadOnly {
node.MarkAsFailing()
- return err
}
- if isRedisError(err) {
- continue
+
+ if !isRedisError(err) {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds[i+1:], err)
+ return err
}
+ }
+
+ if err := cmds[0].Err(); err != nil && shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
return err
}
+
return nil
}
@@ -1260,8 +1398,10 @@ func (c *ClusterClient) checkMovedErr(
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
func (c *ClusterClient) TxPipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
}
pipe.init()
return &pipe
@@ -1272,10 +1412,6 @@ func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) erro
}
func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
-}
-
-func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
// Trim multi .. exec.
cmds = cmds[1 : len(cmds)-1]
@@ -1285,7 +1421,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
return err
}
- cmdsMap := c.mapCmdsBySlot(cmds)
+ cmdsMap := c.mapCmdsBySlot(ctx, cmds)
for slot, cmds := range cmdsMap {
node, err := state.slotMasterNode(slot)
if err != nil {
@@ -1309,19 +1445,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
wg.Add(1)
go func(node *clusterNode, cmds []Cmder) {
defer wg.Done()
-
- err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
- if err == nil {
- return
- }
-
- if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
- setCmdsErr(cmds, err)
- }
- } else {
- setCmdsErr(cmds, err)
- }
+ c.processTxPipelineNode(ctx, node, cmds, failedCmds)
}(node, cmds)
}
@@ -1336,44 +1460,69 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
return cmdsFirstErr(cmds)
}
-func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder {
cmdsMap := make(map[int][]Cmder)
for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
+ slot := c.cmdSlot(ctx, cmd)
cmdsMap[slot] = append(cmdsMap[slot], cmd)
}
return cmdsMap
}
-func (c *ClusterClient) _processTxPipelineNode(
+func (c *ClusterClient) processTxPipelineNode(
ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) {
+ cmds = wrapMultiExec(ctx, cmds)
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processTxPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processTxPipelineNodeConn(
+ ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
) error {
- return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return err
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ trimmedCmds := cmds[1 : len(cmds)-1]
+
+ if err := c.txPipelineReadQueued(
+ ctx, rd, statusCmd, trimmedCmds, failedCmds,
+ ); err != nil {
+ setCmdsErr(cmds, err)
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ return c.cmdsMoved(ctx, trimmedCmds, moved, ask, addr, failedCmds)
}
- return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- statusCmd := cmds[0].(*StatusCmd)
- // Trim multi and exec.
- cmds = cmds[1 : len(cmds)-1]
-
- err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
- if err != nil {
- moved, ask, addr := isMovedError(err)
- if moved || ask {
- return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
- }
- return err
- }
+ return err
+ }
- return pipelineReadCmds(rd, cmds)
- })
- })
+ return pipelineReadCmds(rd, trimmedCmds)
})
}
@@ -1406,12 +1555,7 @@ func (c *ClusterClient) txPipelineReadQueued(
return err
}
- switch line[0] {
- case proto.ErrorReply:
- return proto.ParseErrorReply(line)
- case proto.ArrayReply:
- // ok
- default:
+ if line[0] != proto.RespArray {
return fmt.Errorf("redis: expected '*', but got line %q", line)
}
@@ -1568,6 +1712,15 @@ func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *Pub
return pubsub
}
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *ClusterClient) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.SSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
@@ -1614,27 +1767,27 @@ func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo,
return nil, firstErr
}
-func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
- cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
+func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
if err != nil {
+ internal.Logger.Printf(context.TODO(), "getting command info: %s", err)
return nil
}
info := cmdsInfo[name]
if info == nil {
- internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+ internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name)
}
return info
}
-func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int {
args := cmd.Args()
if args[0] == "cluster" && args[1] == "getkeysinslot" {
return args[2].(int)
}
- cmdInfo := c.cmdInfo(cmd.Name())
- return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
+ return cmdSlot(cmd, cmdFirstKeyPos(cmd))
}
func cmdSlot(cmd Cmder, pos int) int {
@@ -1647,7 +1800,7 @@ func cmdSlot(cmd Cmder, pos int) int {
func (c *ClusterClient) cmdNode(
ctx context.Context,
- cmdInfo *CommandInfo,
+ cmdName string,
slot int,
) (*clusterNode, error) {
state, err := c.state.Get(ctx)
@@ -1655,13 +1808,16 @@ func (c *ClusterClient) cmdNode(
return nil, err
}
- if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
- return c.slotReadOnlyNode(state, slot)
+ if c.opt.ReadOnly {
+ cmdInfo := c.cmdInfo(ctx, cmdName)
+ if cmdInfo != nil && cmdInfo.ReadOnly {
+ return c.slotReadOnlyNode(state, slot)
+ }
}
return state.slotMasterNode(slot)
}
-func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
if c.opt.RouteByLatency {
return state.slotClosestNode(slot)
}
@@ -1708,6 +1864,13 @@ func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client,
return node.Client, err
}
+func (c *ClusterClient) context(ctx context.Context) context.Context {
+ if c.opt.ContextTimeoutEnabled {
+ return ctx
+ }
+ return context.Background()
+}
+
func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
for _, n := range nodes {
if n == node {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_commands.go
index 085bce8..b13f8e7 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_commands.go
@@ -8,7 +8,7 @@ import (
func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
cmd := NewIntCmd(ctx, "dbsize")
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
var size int64
err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
n, err := master.DBSize(ctx).Result()
@@ -30,8 +30,8 @@ func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
cmd := NewStringCmd(ctx, "script", "load", script)
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- mu := &sync.Mutex{}
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptLoad(ctx, script).Result()
if err != nil {
@@ -56,7 +56,7 @@ func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCm
func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
cmd := NewStatusCmd(ctx, "script", "flush")
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
return shard.ScriptFlush(ctx).Err()
})
@@ -82,8 +82,8 @@ func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *Boo
result[i] = true
}
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- mu := &sync.Mutex{}
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptExists(ctx, hashes...).Result()
if err != nil {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_test.go
index 6ee7364..3d2f807 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_test.go
@@ -2,6 +2,8 @@ package redis_test
import (
"context"
+ "crypto/tls"
+ "errors"
"fmt"
"net"
"strconv"
@@ -9,11 +11,11 @@ import (
"sync"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
- "github.com/go-redis/redis/v8/internal/hashtag"
+ "github.com/redis/go-redis/v9"
+ "github.com/redis/go-redis/v9/internal/hashtag"
)
type clusterScenario struct {
@@ -82,8 +84,10 @@ func (s *clusterScenario) newClusterClient(
func (s *clusterScenario) Close() error {
for _, port := range s.ports {
- processes[port].Close()
- delete(processes, port)
+ if process, ok := processes[port]; ok {
+ process.Close()
+ delete(processes, port)
+ }
}
return nil
}
@@ -237,14 +241,6 @@ var _ = Describe("ClusterClient", func() {
var client *redis.ClusterClient
assertClusterClient := func() {
- It("supports WithContext", func() {
- ctx, cancel := context.WithCancel(ctx)
- cancel()
-
- err := client.Ping(ctx).Err()
- Expect(err).To(MatchError("context canceled"))
- })
-
It("should GET/SET/DEL", func() {
err := client.Get(ctx, "A").Err()
Expect(err).To(Equal(redis.Nil))
@@ -515,9 +511,7 @@ var _ = Describe("ClusterClient", func() {
pipe = client.Pipeline().(*redis.Pipeline)
})
- AfterEach(func() {
- Expect(pipe.Close()).NotTo(HaveOccurred())
- })
+ AfterEach(func() {})
assertPipeline()
})
@@ -527,9 +521,7 @@ var _ = Describe("ClusterClient", func() {
pipe = client.TxPipeline().(*redis.Pipeline)
})
- AfterEach(func() {
- Expect(pipe.Close()).NotTo(HaveOccurred())
- })
+ AfterEach(func() {})
assertPipeline()
})
@@ -559,6 +551,30 @@ var _ = Describe("ClusterClient", func() {
}, 30*time.Second).ShouldNot(HaveOccurred())
})
+ It("supports sharded PubSub", func() {
+ pubsub := client.SSubscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ Eventually(func() error {
+ _, err := client.SPublish(ctx, "mychannel", "hello").Result()
+ if err != nil {
+ return err
+ }
+
+ msg, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ if err != nil {
+ return err
+ }
+
+ _, ok := msg.(*redis.Message)
+ if !ok {
+ return fmt.Errorf("got %T, wanted *redis.Message", msg)
+ }
+
+ return nil
+ }, 30*time.Second).ShouldNot(HaveOccurred())
+ })
+
It("supports PubSub.Ping without channels", func() {
pubsub := client.Subscribe(ctx)
defer pubsub.Close()
@@ -568,9 +584,39 @@ var _ = Describe("ClusterClient", func() {
})
}
+ Describe("ClusterClient PROTO 2", func() {
+ BeforeEach(func() {
+ opt = redisClusterOptions()
+ opt.Protocol = 2
+ client = cluster.newClusterClient(ctx, opt)
+
+ err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ return master.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ _ = client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ return master.FlushDB(ctx).Err()
+ })
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should CLUSTER PROTO 2", func() {
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainElements("proto", int64(2)))
+ return nil
+ })
+ })
+ })
+
Describe("ClusterClient", func() {
BeforeEach(func() {
opt = redisClusterOptions()
+ opt.ClientName = "cluster_hi"
client = cluster.newClusterClient(ctx, opt)
err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
@@ -661,6 +707,90 @@ var _ = Describe("ClusterClient", func() {
Expect(assertSlotsEqual(res, wanted)).NotTo(HaveOccurred())
})
+ It("should CLUSTER SHARDS", func() {
+ res, err := client.ClusterShards(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).NotTo(BeEmpty())
+
+ // Iterate over the ClusterShard results and validate the fields.
+ for _, shard := range res {
+ Expect(shard.Slots).NotTo(BeEmpty())
+ for _, slotRange := range shard.Slots {
+ Expect(slotRange.Start).To(BeNumerically(">=", 0))
+ Expect(slotRange.End).To(BeNumerically(">=", slotRange.Start))
+ }
+
+ Expect(shard.Nodes).NotTo(BeEmpty())
+ for _, node := range shard.Nodes {
+ Expect(node.ID).NotTo(BeEmpty())
+ Expect(node.Endpoint).NotTo(BeEmpty())
+ Expect(node.IP).NotTo(BeEmpty())
+ Expect(node.Port).To(BeNumerically(">", 0))
+
+ validRoles := []string{"master", "slave", "replica"}
+ Expect(validRoles).To(ContainElement(node.Role))
+
+ Expect(node.ReplicationOffset).To(BeNumerically(">=", 0))
+
+ validHealthStatuses := []string{"online", "failed", "loading"}
+ Expect(validHealthStatuses).To(ContainElement(node.Health))
+ }
+ }
+ })
+
+ It("should CLUSTER LINKS", func() {
+ res, err := client.ClusterLinks(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).NotTo(BeEmpty())
+
+ // Iterate over the ClusterLink results and validate the map keys.
+ for _, link := range res {
+
+ Expect(link.Direction).NotTo(BeEmpty())
+ Expect([]string{"from", "to"}).To(ContainElement(link.Direction))
+ Expect(link.Node).NotTo(BeEmpty())
+ Expect(link.CreateTime).To(BeNumerically(">", 0))
+
+ Expect(link.Events).NotTo(BeEmpty())
+ validEventChars := []rune{'r', 'w'}
+ for _, eventChar := range link.Events {
+ Expect(validEventChars).To(ContainElement(eventChar))
+ }
+
+ Expect(link.SendBufferAllocated).To(BeNumerically(">=", 0))
+ Expect(link.SendBufferUsed).To(BeNumerically(">=", 0))
+ }
+ })
+
+ It("should cluster client setname", func() {
+ err := client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ return c.Ping(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.ClientList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainSubstring("name=cluster_hi"))
+ return nil
+ })
+ })
+
+ It("should CLUSTER PROTO 3", func() {
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(HaveKeyWithValue("proto", int64(3)))
+ return nil
+ })
+ })
+
+ It("should CLUSTER MYSHARDID", func() {
+ shardID, err := client.ClusterMyShardID(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(shardID).ToNot(BeEmpty())
+ })
+
It("should CLUSTER NODES", func() {
res, err := client.ClusterNodes(ctx).Result()
Expect(err).NotTo(HaveOccurred())
@@ -737,6 +867,9 @@ var _ = Describe("ClusterClient", func() {
})
It("supports Process hook", func() {
+ testCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
err := client.Ping(ctx).Err()
Expect(err).NotTo(HaveOccurred())
@@ -748,29 +881,47 @@ var _ = Describe("ClusterClient", func() {
var stack []string
clusterHook := &hook{
- beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- Expect(cmd.String()).To(Equal("ping: "))
- stack = append(stack, "cluster.BeforeProcess")
- return ctx, nil
- },
- afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
- Expect(cmd.String()).To(Equal("ping: PONG"))
- stack = append(stack, "cluster.AfterProcess")
- return nil
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ select {
+ case <-testCtx.Done():
+ return hook(ctx, cmd)
+ default:
+ }
+
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "cluster.BeforeProcess")
+
+ err := hook(ctx, cmd)
+
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "cluster.AfterProcess")
+
+ return err
+ }
},
}
client.AddHook(clusterHook)
nodeHook := &hook{
- beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- Expect(cmd.String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcess")
- return ctx, nil
- },
- afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
- Expect(cmd.String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcess")
- return nil
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ select {
+ case <-testCtx.Done():
+ return hook(ctx, cmd)
+ default:
+ }
+
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcess")
+
+ err := hook(ctx, cmd)
+
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcess")
+
+ return err
+ }
},
}
@@ -787,11 +938,6 @@ var _ = Describe("ClusterClient", func() {
"shard.AfterProcess",
"cluster.AfterProcess",
}))
-
- clusterHook.beforeProcess = nil
- clusterHook.afterProcess = nil
- nodeHook.beforeProcess = nil
- nodeHook.afterProcess = nil
})
It("supports Pipeline hook", func() {
@@ -806,33 +952,39 @@ var _ = Describe("ClusterClient", func() {
var stack []string
client.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: "))
- stack = append(stack, "cluster.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: PONG"))
- stack = append(stack, "cluster.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "cluster.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "cluster.AfterProcessPipeline")
+
+ return err
+ }
},
})
_ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
node.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+
+ return err
+ }
},
})
return nil
@@ -863,33 +1015,39 @@ var _ = Describe("ClusterClient", func() {
var stack []string
client.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(3))
- Expect(cmds[1].String()).To(Equal("ping: "))
- stack = append(stack, "cluster.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(3))
- Expect(cmds[1].String()).To(Equal("ping: PONG"))
- stack = append(stack, "cluster.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: "))
+ stack = append(stack, "cluster.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "cluster.AfterProcessPipeline")
+
+ return err
+ }
},
})
_ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
node.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(3))
- Expect(cmds[1].String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(3))
- Expect(cmds[1].String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+
+ return err
+ }
},
})
return nil
@@ -1182,16 +1340,17 @@ var _ = Describe("ClusterClient with unavailable Cluster", func() {
var client *redis.ClusterClient
BeforeEach(func() {
- for _, node := range cluster.clients {
- err := node.ClientPause(ctx, 5*time.Second).Err()
- Expect(err).NotTo(HaveOccurred())
- }
-
opt := redisClusterOptions()
opt.ReadTimeout = 250 * time.Millisecond
opt.WriteTimeout = 250 * time.Millisecond
opt.MaxRedirects = 1
client = cluster.newClusterClientUnstable(opt)
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+
+ for _, node := range cluster.clients {
+ err := node.ClientPause(ctx, 5*time.Second).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
})
AfterEach(func() {
@@ -1257,27 +1416,175 @@ var _ = Describe("ClusterClient timeout", func() {
Context("read/write timeout", func() {
BeforeEach(func() {
opt := redisClusterOptions()
- opt.ReadTimeout = 250 * time.Millisecond
- opt.WriteTimeout = 250 * time.Millisecond
- opt.MaxRedirects = 1
client = cluster.newClusterClient(ctx, opt)
err := client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error {
- return client.ClientPause(ctx, pause).Err()
+ err := client.ClientPause(ctx, pause).Err()
+
+ opt := client.Options()
+ opt.ReadTimeout = time.Nanosecond
+ opt.WriteTimeout = time.Nanosecond
+
+ return err
})
Expect(err).NotTo(HaveOccurred())
+
+ // Overwrite timeouts after the client is initialized.
+ opt.ReadTimeout = time.Nanosecond
+ opt.WriteTimeout = time.Nanosecond
+ opt.MaxRedirects = 0
})
AfterEach(func() {
_ = client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error {
defer GinkgoRecover()
+
+ opt := client.Options()
+ opt.ReadTimeout = time.Second
+ opt.WriteTimeout = time.Second
+
Eventually(func() error {
return client.Ping(ctx).Err()
}, 2*pause).ShouldNot(HaveOccurred())
return nil
})
+
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
})
testTimeout()
})
})
+
+var _ = Describe("ClusterClient ParseURL", func() {
+ cases := []struct {
+ test string
+ url string
+ o *redis.ClusterOptions // expected value
+ err error
+ }{
+ {
+ test: "ParseRedisURL",
+ url: "redis://localhost:123",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}},
+ }, {
+ test: "ParseRedissURL",
+ url: "rediss://localhost:123",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, TLSConfig: &tls.Config{ServerName: "localhost"}},
+ }, {
+ test: "MissingRedisPort",
+ url: "redis://localhost",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:6379"}},
+ }, {
+ test: "MissingRedissPort",
+ url: "rediss://localhost",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:6379"}, TLSConfig: &tls.Config{ServerName: "localhost"}},
+ }, {
+ test: "MultipleRedisURLs",
+ url: "redis://localhost:123?addr=localhost:1234&addr=localhost:12345",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234", "localhost:12345"}},
+ }, {
+ test: "MultipleRedissURLs",
+ url: "rediss://localhost:123?addr=localhost:1234&addr=localhost:12345",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234", "localhost:12345"}, TLSConfig: &tls.Config{ServerName: "localhost"}},
+ }, {
+ test: "OnlyPassword",
+ url: "redis://:bar@localhost:123",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Password: "bar"},
+ }, {
+ test: "OnlyUser",
+ url: "redis://foo@localhost:123",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Username: "foo"},
+ }, {
+ test: "RedisUsernamePassword",
+ url: "redis://foo:bar@localhost:123",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Username: "foo", Password: "bar"},
+ }, {
+ test: "RedissUsernamePassword",
+ url: "rediss://foo:bar@localhost:123?addr=localhost:1234",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234"}, Username: "foo", Password: "bar", TLSConfig: &tls.Config{ServerName: "localhost"}},
+ }, {
+ test: "QueryParameters",
+ url: "redis://localhost:123?read_timeout=2&pool_fifo=true&addr=localhost:1234",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234"}, ReadTimeout: 2 * time.Second, PoolFIFO: true},
+ }, {
+ test: "DisabledTimeout",
+ url: "redis://localhost:123?conn_max_idle_time=0",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: -1},
+ }, {
+ test: "DisabledTimeoutNeg",
+ url: "redis://localhost:123?conn_max_idle_time=-1",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: -1},
+ }, {
+ test: "UseDefault",
+ url: "redis://localhost:123?conn_max_idle_time=",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: 0},
+ }, {
+ test: "Protocol",
+ url: "redis://localhost:123?protocol=2",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Protocol: 2},
+ }, {
+ test: "ClientName",
+ url: "redis://localhost:123?client_name=cluster_hi",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ClientName: "cluster_hi"},
+ }, {
+ test: "UseDefaultMissing=",
+ url: "redis://localhost:123?conn_max_idle_time",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: 0},
+ }, {
+ test: "InvalidQueryAddr",
+ url: "rediss://foo:bar@localhost:123?addr=rediss://foo:barr@localhost:1234",
+ err: errors.New(`redis: unable to parse addr param: rediss://foo:barr@localhost:1234`),
+ }, {
+ test: "InvalidInt",
+ url: "redis://localhost?pool_size=five",
+ err: errors.New(`redis: invalid pool_size number: strconv.Atoi: parsing "five": invalid syntax`),
+ }, {
+ test: "InvalidBool",
+ url: "redis://localhost?pool_fifo=yes",
+ err: errors.New(`redis: invalid pool_fifo boolean: expected true/false/1/0 or an empty string, got "yes"`),
+ }, {
+ test: "UnknownParam",
+ url: "redis://localhost?abc=123",
+ err: errors.New("redis: unexpected option: abc"),
+ }, {
+ test: "InvalidScheme",
+ url: "https://google.com",
+ err: errors.New("redis: invalid URL scheme: https"),
+ },
+ }
+
+ It("match ParseClusterURL", func() {
+ for i := range cases {
+ tc := cases[i]
+ actual, err := redis.ParseClusterURL(tc.url)
+ if tc.err != nil {
+ Expect(err).Should(MatchError(tc.err))
+ } else {
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ if err == nil {
+ Expect(tc.o).NotTo(BeNil())
+
+ Expect(tc.o.Addrs).To(Equal(actual.Addrs))
+ Expect(tc.o.TLSConfig).To(Equal(actual.TLSConfig))
+ Expect(tc.o.Username).To(Equal(actual.Username))
+ Expect(tc.o.Password).To(Equal(actual.Password))
+ Expect(tc.o.MaxRetries).To(Equal(actual.MaxRetries))
+ Expect(tc.o.MinRetryBackoff).To(Equal(actual.MinRetryBackoff))
+ Expect(tc.o.MaxRetryBackoff).To(Equal(actual.MaxRetryBackoff))
+ Expect(tc.o.DialTimeout).To(Equal(actual.DialTimeout))
+ Expect(tc.o.ReadTimeout).To(Equal(actual.ReadTimeout))
+ Expect(tc.o.WriteTimeout).To(Equal(actual.WriteTimeout))
+ Expect(tc.o.PoolFIFO).To(Equal(actual.PoolFIFO))
+ Expect(tc.o.PoolSize).To(Equal(actual.PoolSize))
+ Expect(tc.o.MinIdleConns).To(Equal(actual.MinIdleConns))
+ Expect(tc.o.ConnMaxLifetime).To(Equal(actual.ConnMaxLifetime))
+ Expect(tc.o.ConnMaxIdleTime).To(Equal(actual.ConnMaxIdleTime))
+ Expect(tc.o.PoolTimeout).To(Equal(actual.PoolTimeout))
+ }
+ }
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline.go
index 31bab97..1c11420 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline.go
@@ -2,9 +2,7 @@ package redis
import (
"context"
- "sync"
-
- "github.com/go-redis/redis/v8/internal/pool"
+ "errors"
)
type pipelineExecer func(context.Context, []Cmder) error
@@ -13,7 +11,7 @@ type pipelineExecer func(context.Context, []Cmder) error
//
// Pipelining is a technique to extremely speed up processing by packing
// operations to batches, send them at once to Redis and read a replies in a
-// singe step.
+// single step.
// See https://redis.io/topics/pipelining
//
// Pay attention, that Pipeline is not a transaction, so you can get unexpected
@@ -24,29 +22,35 @@ type pipelineExecer func(context.Context, []Cmder) error
// depends of your batch size and/or use TxPipeline.
type Pipeliner interface {
StatefulCmdable
+
+ // Len is to obtain the number of commands in the pipeline that have not yet been executed.
Len() int
+
+ // Do is an API for executing any command.
+ // If a certain Redis command is not yet supported, you can use Do to execute it.
Do(ctx context.Context, args ...interface{}) *Cmd
+
+ // Process is to put the commands to be executed into the pipeline buffer.
Process(ctx context.Context, cmd Cmder) error
- Close() error
- Discard() error
+
+ // Discard is to discard all commands in the cache that have not yet been executed.
+ Discard()
+
+ // Exec is to send all the commands buffered in the pipeline to the redis-server.
Exec(ctx context.Context) ([]Cmder, error)
}
var _ Pipeliner = (*Pipeline)(nil)
// Pipeline implements pipelining as described in
-// http://redis.io/topics/pipelining. It's safe for concurrent use
-// by multiple goroutines.
+// http://redis.io/topics/pipelining.
+// Please note: it is not safe for concurrent use by multiple goroutines.
type Pipeline struct {
cmdable
statefulCmdable
- ctx context.Context
exec pipelineExecer
-
- mu sync.Mutex
- cmds []Cmder
- closed bool
+ cmds []Cmder
}
func (c *Pipeline) init() {
@@ -56,50 +60,29 @@ func (c *Pipeline) init() {
// Len returns the number of queued commands.
func (c *Pipeline) Len() int {
- c.mu.Lock()
- ln := len(c.cmds)
- c.mu.Unlock()
- return ln
+ return len(c.cmds)
}
// Do queues the custom command for later execution.
func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
+ if len(args) == 0 {
+ cmd.SetErr(errors.New("redis: please enter the command to be executed"))
+ return cmd
+ }
_ = c.Process(ctx, cmd)
return cmd
}
// Process queues the cmd for later execution.
func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
- c.mu.Lock()
c.cmds = append(c.cmds, cmd)
- c.mu.Unlock()
- return nil
-}
-
-// Close closes the pipeline, releasing any open resources.
-func (c *Pipeline) Close() error {
- c.mu.Lock()
- _ = c.discard()
- c.closed = true
- c.mu.Unlock()
return nil
}
// Discard resets the pipeline and discards queued commands.
-func (c *Pipeline) Discard() error {
- c.mu.Lock()
- err := c.discard()
- c.mu.Unlock()
- return err
-}
-
-func (c *Pipeline) discard() error {
- if c.closed {
- return pool.ErrClosed
- }
+func (c *Pipeline) Discard() {
c.cmds = c.cmds[:0]
- return nil
}
// Exec executes all previously queued commands using one
@@ -108,13 +91,6 @@ func (c *Pipeline) discard() error {
// Exec always returns list of commands and error of the first failed
// command if any.
func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil, pool.ErrClosed
- }
-
if len(c.cmds) == 0 {
return nil, nil
}
@@ -129,9 +105,7 @@ func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]C
if err := fn(c); err != nil {
return nil, err
}
- cmds, err := c.Exec(ctx)
- _ = c.Close()
- return cmds, err
+ return c.Exec(ctx)
}
func (c *Pipeline) Pipeline() Pipeliner {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline_test.go
index f24114d..7f73447 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline_test.go
@@ -1,12 +1,13 @@
package redis_test
import (
+ "errors"
"strconv"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("pipelining", func() {
@@ -70,7 +71,7 @@ var _ = Describe("pipelining", func() {
Expect(cmds).To(HaveLen(1))
})
- It("handles large pipelines", func() {
+ It("handles large pipelines", Label("NonRedisEnterprise"), func() {
for callCount := 1; callCount < 16; callCount++ {
for i := 1; i <= callCount; i++ {
pipe.SetNX(ctx, strconv.Itoa(i)+"_key", strconv.Itoa(i)+"_value", 0)
@@ -84,6 +85,11 @@ var _ = Describe("pipelining", func() {
}
}
})
+
+ It("should Exec, not Do", func() {
+ err := pipe.Do(ctx).Err()
+ Expect(err).To(Equal(errors.New("redis: please enter the command to be executed")))
+ })
}
Describe("Pipeline", func() {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pool_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pool_test.go
index dbef72e..1864e88 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pool_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pool_test.go
@@ -4,10 +4,10 @@ import (
"context"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("pool", func() {
@@ -16,8 +16,8 @@ var _ = Describe("pool", func() {
BeforeEach(func() {
opt := redisOptions()
opt.MinIdleConns = 0
- opt.MaxConnAge = 0
- opt.IdleTimeout = time.Second
+ opt.ConnMaxLifetime = 0
+ opt.ConnMaxIdleTime = time.Second
client = redis.NewClient(opt)
})
@@ -72,7 +72,6 @@ var _ = Describe("pool", func() {
Expect(cmds).To(HaveLen(1))
Expect(ping.Err()).NotTo(HaveOccurred())
Expect(ping.Val()).To(Equal("PONG"))
- Expect(pipe.Close()).NotTo(HaveOccurred())
})
pool := client.Pool()
@@ -87,13 +86,14 @@ var _ = Describe("pool", func() {
cn.SetNetConn(&badConn{})
client.Pool().Put(ctx, cn)
- err = client.Ping(ctx).Err()
- Expect(err).To(MatchError("bad connection"))
-
val, err := client.Ping(ctx).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(Equal("PONG"))
+ val, err = client.Ping(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+
pool := client.Pool()
Expect(pool.Len()).To(Equal(1))
Expect(pool.IdleLen()).To(Equal(1))
@@ -105,11 +105,11 @@ var _ = Describe("pool", func() {
})
It("reuses connections", func() {
- // explain: https://github.com/go-redis/redis/pull/1675
+ // explain: https://github.com/redis/go-redis/pull/1675
opt := redisOptions()
opt.MinIdleConns = 0
- opt.MaxConnAge = 0
- opt.IdleTimeout = 2 * time.Second
+ opt.ConnMaxLifetime = 0
+ opt.ConnMaxIdleTime = 10 * time.Second
client = redis.NewClient(opt)
for i := 0; i < 100; i++ {
@@ -127,31 +127,4 @@ var _ = Describe("pool", func() {
Expect(stats.Misses).To(Equal(uint32(1)))
Expect(stats.Timeouts).To(Equal(uint32(0)))
})
-
- It("removes idle connections", func() {
- err := client.Ping(ctx).Err()
- Expect(err).NotTo(HaveOccurred())
-
- stats := client.PoolStats()
- Expect(stats).To(Equal(&redis.PoolStats{
- Hits: 0,
- Misses: 1,
- Timeouts: 0,
- TotalConns: 1,
- IdleConns: 1,
- StaleConns: 0,
- }))
-
- time.Sleep(2 * time.Second)
-
- stats = client.PoolStats()
- Expect(stats).To(Equal(&redis.PoolStats{
- Hits: 0,
- Misses: 1,
- Timeouts: 0,
- TotalConns: 0,
- IdleConns: 0,
- StaleConns: 1,
- }))
- })
})
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic.go
new file mode 100644
index 0000000..5d5cd1a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic.go
@@ -0,0 +1,1429 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type ProbabilisticCmdable interface {
+ BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd
+ BFCard(ctx context.Context, key string) *IntCmd
+ BFExists(ctx context.Context, key string, element interface{}) *BoolCmd
+ BFInfo(ctx context.Context, key string) *BFInfoCmd
+ BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd
+ BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd
+ BFInfoSize(ctx context.Context, key string) *BFInfoCmd
+ BFInfoFilters(ctx context.Context, key string) *BFInfoCmd
+ BFInfoItems(ctx context.Context, key string) *BFInfoCmd
+ BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd
+ BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd
+ BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd
+ BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd
+ BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd
+ BFReserveWithArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd
+ BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd
+ BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd
+
+ CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFCount(ctx context.Context, key string, element interface{}) *IntCmd
+ CFDel(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFExists(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFInfo(ctx context.Context, key string) *CFInfoCmd
+ CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd
+ CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd
+ CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd
+ CFReserveWithArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd
+ CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd
+ CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd
+ CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd
+ CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd
+ CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd
+
+ CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+ CMSInfo(ctx context.Context, key string) *CMSInfoCmd
+ CMSInitByDim(ctx context.Context, key string, width, height int64) *StatusCmd
+ CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd
+ CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd
+ CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd
+ CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+
+ TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd
+ TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+ TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd
+ TopKInfo(ctx context.Context, key string) *TopKInfoCmd
+ TopKList(ctx context.Context, key string) *StringSliceCmd
+ TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd
+ TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ TopKReserve(ctx context.Context, key string, k int64) *StatusCmd
+ TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd
+
+ TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd
+ TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd
+ TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd
+ TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd
+ TDigestCreate(ctx context.Context, key string) *StatusCmd
+ TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd
+ TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd
+ TDigestMax(ctx context.Context, key string) *FloatCmd
+ TDigestMin(ctx context.Context, key string) *FloatCmd
+ TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd
+ TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd
+ TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd
+ TDigestReset(ctx context.Context, key string) *StatusCmd
+ TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd
+ TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd
+}
+
+type BFInsertOptions struct {
+ Capacity int64
+ Error float64
+ Expansion int64
+ NonScaling bool
+ NoCreate bool
+}
+
+type BFReserveOptions struct {
+ Capacity int64
+ Error float64
+ Expansion int64
+ NonScaling bool
+}
+
+type CFReserveOptions struct {
+ Capacity int64
+ BucketSize int64
+ MaxIterations int64
+ Expansion int64
+}
+
+type CFInsertOptions struct {
+ Capacity int64
+ NoCreate bool
+}
+
+// -------------------------------------------
+// Bloom filter commands
+//-------------------------------------------
+
+// BFReserve creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveExpansion creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying an expansion rate for the filter.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "EXPANSION", expansion}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveNonScaling creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying that the filter should not scale.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "NONSCALING"}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveWithArgs creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying additional options such as expansion rate and non-scaling behavior.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveWithArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key}
+ if options != nil {
+ args = append(args, options.Error, options.Capacity)
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ if options.NonScaling {
+ args = append(args, "NONSCALING")
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFAdd adds an item to a Bloom filter.
+// For more information - https://redis.io/commands/bf.add/
+func (c cmdable) BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"BF.ADD", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFCard returns the cardinality of a Bloom filter -
+// number of items that were added to a Bloom filter and detected as unique
+// (items that caused at least one bit to be set in at least one sub-filter).
+// For more information - https://redis.io/commands/bf.card/
+func (c cmdable) BFCard(ctx context.Context, key string) *IntCmd {
+ args := []interface{}{"BF.CARD", key}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFExists determines whether a given item was added to a Bloom filter.
+// For more information - https://redis.io/commands/bf.exists/
+func (c cmdable) BFExists(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"BF.EXISTS", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFLoadChunk restores a Bloom filter previously saved using BF.SCANDUMP.
+// For more information - https://redis.io/commands/bf.loadchunk/
+func (c cmdable) BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd {
+ args := []interface{}{"BF.LOADCHUNK", key, iterator, data}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Begins an incremental save of the Bloom filter.
+// This command is useful for large Bloom filters that cannot fit into the DUMP and RESTORE model.
+// For more information - https://redis.io/commands/bf.scandump/
+func (c cmdable) BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd {
+ args := []interface{}{"BF.SCANDUMP", key, iterator}
+ cmd := newScanDumpCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type ScanDump struct {
+ Iter int64
+ Data string
+}
+
+type ScanDumpCmd struct {
+ baseCmd
+
+ val ScanDump
+}
+
+func newScanDumpCmd(ctx context.Context, args ...interface{}) *ScanDumpCmd {
+ return &ScanDumpCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ScanDumpCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ScanDumpCmd) SetVal(val ScanDump) {
+ cmd.val = val
+}
+
+func (cmd *ScanDumpCmd) Result() (ScanDump, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ScanDumpCmd) Val() ScanDump {
+ return cmd.val
+}
+
+func (cmd *ScanDumpCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = ScanDump{}
+ for i := 0; i < n; i++ {
+ iter, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ data, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val.Data = data
+ cmd.val.Iter = iter
+
+ }
+
+ return nil
+}
+
+// Returns information about a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfo(ctx context.Context, key string) *BFInfoCmd {
+ args := []interface{}{"BF.INFO", key}
+ cmd := NewBFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BFInfo struct {
+ Capacity int64
+ Size int64
+ Filters int64
+ ItemsInserted int64
+ ExpansionRate int64
+}
+
+type BFInfoCmd struct {
+ baseCmd
+
+ val BFInfo
+}
+
+func NewBFInfoCmd(ctx context.Context, args ...interface{}) *BFInfoCmd {
+ return &BFInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BFInfoCmd) SetVal(val BFInfo) {
+ cmd.val = val
+}
+
+func (cmd *BFInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BFInfoCmd) Val() BFInfo {
+ return cmd.val
+}
+
+func (cmd *BFInfoCmd) Result() (BFInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BFInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result BFInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Capacity":
+ result.Capacity, err = rd.ReadInt()
+ case "Size":
+ result.Size, err = rd.ReadInt()
+ case "Number of filters":
+ result.Filters, err = rd.ReadInt()
+ case "Number of items inserted":
+ result.ItemsInserted, err = rd.ReadInt()
+ case "Expansion rate":
+ result.ExpansionRate, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// BFInfoCapacity returns information about the capacity of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "CAPACITY")
+}
+
+// BFInfoSize returns information about the size of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoSize(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "SIZE")
+}
+
+// BFInfoFilters returns information about the filters of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoFilters(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "FILTERS")
+}
+
+// BFInfoItems returns information about the items of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoItems(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "ITEMS")
+}
+
+// BFInfoExpansion returns information about the expansion rate of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "EXPANSION")
+}
+
+// BFInfoArg returns information about a specific option of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd {
+ args := []interface{}{"BF.INFO", key, option}
+ cmd := NewBFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFInsert inserts elements into a Bloom filter.
+// This function also allows for specifying additional options such as:
+// capacity, error rate, expansion rate, and non-scaling behavior.
+// For more information - https://redis.io/commands/bf.insert/
+func (c cmdable) BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.INSERT", key}
+ if options != nil {
+ if options.Capacity != 0 {
+ args = append(args, "CAPACITY", options.Capacity)
+ }
+ if options.Error != 0 {
+ args = append(args, "ERROR", options.Error)
+ }
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ if options.NoCreate {
+ args = append(args, "NOCREATE")
+ }
+ if options.NonScaling {
+ args = append(args, "NONSCALING")
+ }
+ }
+ args = append(args, "ITEMS")
+ args = append(args, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFMAdd adds multiple elements to a Bloom filter.
+// Returns an array of booleans indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/bf.madd/
+func (c cmdable) BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.MADD", key}
+ args = append(args, elements...)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFMExists check if multiple elements exist in a Bloom filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/bf.mexists/
+func (c cmdable) BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.MEXISTS", key}
+ args = append(args, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// Cuckoo filter commands
+//-------------------------------------------
+
+// CFReserve creates an empty Cuckoo filter with the specified capacity.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveExpansion creates an empty Cuckoo filter with the specified capacity and expansion rate.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "EXPANSION", expansion}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveBucketSize creates an empty Cuckoo filter with the specified capacity and bucket size.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "BUCKETSIZE", bucketsize}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveMaxIterations creates an empty Cuckoo filter with the specified capacity and maximum number of iterations.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "MAXITERATIONS", maxiterations}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveWithArgs creates an empty Cuckoo filter with the specified options.
+// This function allows for specifying additional options such as bucket size and maximum number of iterations.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveWithArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, options.Capacity}
+ if options.BucketSize != 0 {
+ args = append(args, "BUCKETSIZE", options.BucketSize)
+ }
+ if options.MaxIterations != 0 {
+ args = append(args, "MAXITERATIONS", options.MaxIterations)
+ }
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFAdd adds an element to a Cuckoo filter.
+// Returns true if the element was added to the filter or false if it already exists in the filter.
+// For more information - https://redis.io/commands/cf.add/
+func (c cmdable) CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.ADD", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFAddNX adds an element to a Cuckoo filter only if it does not already exist in the filter.
+// Returns true if the element was added to the filter or false if it already exists in the filter.
+// For more information - https://redis.io/commands/cf.addnx/
+func (c cmdable) CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.ADDNX", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFCount returns an estimate of the number of times an element may be in a Cuckoo Filter.
+// For more information - https://redis.io/commands/cf.count/
+func (c cmdable) CFCount(ctx context.Context, key string, element interface{}) *IntCmd {
+ args := []interface{}{"CF.COUNT", key, element}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFDel deletes an item once from the cuckoo filter.
+// For more information - https://redis.io/commands/cf.del/
+func (c cmdable) CFDel(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.DEL", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFExists determines whether an item may exist in the Cuckoo Filter or not.
+// For more information - https://redis.io/commands/cf.exists/
+func (c cmdable) CFExists(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.EXISTS", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFLoadChunk restores a filter previously saved using SCANDUMP.
+// For more information - https://redis.io/commands/cf.loadchunk/
+func (c cmdable) CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd {
+ args := []interface{}{"CF.LOADCHUNK", key, iterator, data}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFScanDump begins an incremental save of the cuckoo filter.
+// For more information - https://redis.io/commands/cf.scandump/
+func (c cmdable) CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd {
+ args := []interface{}{"CF.SCANDUMP", key, iterator}
+ cmd := newScanDumpCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type CFInfo struct {
+ Size int64
+ NumBuckets int64
+ NumFilters int64
+ NumItemsInserted int64
+ NumItemsDeleted int64
+ BucketSize int64
+ ExpansionRate int64
+ MaxIteration int64
+}
+
+type CFInfoCmd struct {
+ baseCmd
+
+ val CFInfo
+}
+
+func NewCFInfoCmd(ctx context.Context, args ...interface{}) *CFInfoCmd {
+ return &CFInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CFInfoCmd) SetVal(val CFInfo) {
+ cmd.val = val
+}
+
+func (cmd *CFInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CFInfoCmd) Val() CFInfo {
+ return cmd.val
+}
+
+func (cmd *CFInfoCmd) Result() (CFInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CFInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result CFInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Size":
+ result.Size, err = rd.ReadInt()
+ case "Number of buckets":
+ result.NumBuckets, err = rd.ReadInt()
+ case "Number of filters":
+ result.NumFilters, err = rd.ReadInt()
+ case "Number of items inserted":
+ result.NumItemsInserted, err = rd.ReadInt()
+ case "Number of items deleted":
+ result.NumItemsDeleted, err = rd.ReadInt()
+ case "Bucket size":
+ result.BucketSize, err = rd.ReadInt()
+ case "Expansion rate":
+ result.ExpansionRate, err = rd.ReadInt()
+ case "Max iterations":
+ result.MaxIteration, err = rd.ReadInt()
+
+ default:
+ return fmt.Errorf("redis: CF.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// CFInfo returns information about a Cuckoo filter.
+// For more information - https://redis.io/commands/cf.info/
+func (c cmdable) CFInfo(ctx context.Context, key string) *CFInfoCmd {
+ args := []interface{}{"CF.INFO", key}
+ cmd := NewCFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFInsert inserts elements into a Cuckoo filter.
+// This function also allows for specifying additional options such as capacity, error rate, expansion rate, and non-scaling behavior.
+// Returns an array of booleans indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/cf.insert/
+func (c cmdable) CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"CF.INSERT", key}
+ args = c.getCfInsertWithArgs(args, options, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFInsertNX inserts elements into a Cuckoo filter only if they do not already exist in the filter.
+// This function also allows for specifying additional options such as:
+// capacity, error rate, expansion rate, and non-scaling behavior.
+// Returns an array of integers indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/cf.insertnx/
+func (c cmdable) CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd {
+ args := []interface{}{"CF.INSERTNX", key}
+ args = c.getCfInsertWithArgs(args, options, elements...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) getCfInsertWithArgs(args []interface{}, options *CFInsertOptions, elements ...interface{}) []interface{} {
+ if options != nil {
+ if options.Capacity != 0 {
+ args = append(args, "CAPACITY", options.Capacity)
+ }
+ if options.NoCreate {
+ args = append(args, "NOCREATE")
+ }
+ }
+ args = append(args, "ITEMS")
+ args = append(args, elements...)
+
+ return args
+}
+
+// CFMExists check if multiple elements exist in a Cuckoo filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/cf.mexists/
+func (c cmdable) CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"CF.MEXISTS", key}
+ args = append(args, elements...)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// CMS commands
+//-------------------------------------------
+
+// CMSIncrBy increments the count of one or more items in a Count-Min Sketch filter.
+// Returns an array of integers representing the updated count of each item.
+// For more information - https://redis.io/commands/cms.incrby/
+func (c cmdable) CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "CMS.INCRBY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type CMSInfo struct {
+ Width int64
+ Depth int64
+ Count int64
+}
+
+type CMSInfoCmd struct {
+ baseCmd
+
+ val CMSInfo
+}
+
+func NewCMSInfoCmd(ctx context.Context, args ...interface{}) *CMSInfoCmd {
+ return &CMSInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CMSInfoCmd) SetVal(val CMSInfo) {
+ cmd.val = val
+}
+
+func (cmd *CMSInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CMSInfoCmd) Val() CMSInfo {
+ return cmd.val
+}
+
+func (cmd *CMSInfoCmd) Result() (CMSInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CMSInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result CMSInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "width":
+ result.Width, err = rd.ReadInt()
+ case "depth":
+ result.Depth, err = rd.ReadInt()
+ case "count":
+ result.Count, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: CMS.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// CMSInfo returns information about a Count-Min Sketch filter.
+// For more information - https://redis.io/commands/cms.info/
+func (c cmdable) CMSInfo(ctx context.Context, key string) *CMSInfoCmd {
+ args := []interface{}{"CMS.INFO", key}
+ cmd := NewCMSInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSInitByDim creates an empty Count-Min Sketch filter with the specified dimensions.
+// For more information - https://redis.io/commands/cms.initbydim/
+func (c cmdable) CMSInitByDim(ctx context.Context, key string, width, depth int64) *StatusCmd {
+ args := []interface{}{"CMS.INITBYDIM", key, width, depth}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSInitByProb creates an empty Count-Min Sketch filter with the specified error rate and probability.
+// For more information - https://redis.io/commands/cms.initbyprob/
+func (c cmdable) CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd {
+ args := []interface{}{"CMS.INITBYPROB", key, errorRate, probability}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSMerge merges multiple Count-Min Sketch filters into a single filter.
+// The destination filter must not exist and will be created with the dimensions of the first source filter.
+// The number of items in each source filter must be equal.
+// Returns OK on success or an error if the filters could not be merged.
+// For more information - https://redis.io/commands/cms.merge/
+func (c cmdable) CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd {
+ args := []interface{}{"CMS.MERGE", destKey, len(sourceKeys)}
+ for _, s := range sourceKeys {
+ args = append(args, s)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSMergeWithWeight merges multiple Count-Min Sketch filters into a single filter with weights for each source filter.
+// The destination filter must not exist and will be created with the dimensions of the first source filter.
+// The number of items in each source filter must be equal.
+// Returns OK on success or an error if the filters could not be merged.
+// For more information - https://redis.io/commands/cms.merge/
+func (c cmdable) CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd {
+ args := make([]interface{}, 0, 4+(len(sourceKeys)*2+1))
+ args = append(args, "CMS.MERGE", destKey, len(sourceKeys))
+
+ if len(sourceKeys) > 0 {
+ sk := make([]interface{}, len(sourceKeys))
+ sw := make([]interface{}, len(sourceKeys))
+
+ i := 0
+ for k, w := range sourceKeys {
+ sk[i] = k
+ sw[i] = w
+ i++
+ }
+
+ args = append(args, sk...)
+ args = append(args, "WEIGHTS")
+ args = append(args, sw...)
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSQuery returns count for item(s).
+// For more information - https://redis.io/commands/cms.query/
+func (c cmdable) CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := []interface{}{"CMS.QUERY", key}
+ args = append(args, elements...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// TopK commands
+//--------------------------------------------
+
+// TopKAdd adds one or more elements to a Top-K filter.
+// Returns an array of strings representing the items that were removed from the filter, if any.
+// For more information - https://redis.io/commands/topk.add/
+func (c cmdable) TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.ADD"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKReserve creates an empty Top-K filter with the specified number of top items to keep.
+// For more information - https://redis.io/commands/topk.reserve/
+func (c cmdable) TopKReserve(ctx context.Context, key string, k int64) *StatusCmd {
+ args := []interface{}{"TOPK.RESERVE", key, k}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKReserveWithOptions creates an empty Top-K filter with the specified number of top items to keep and additional options.
+// This function allows for specifying additional options such as width, depth and decay.
+// For more information - https://redis.io/commands/topk.reserve/
+func (c cmdable) TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd {
+ args := []interface{}{"TOPK.RESERVE", key, k, width, depth, decay}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TopKInfo struct {
+ K int64
+ Width int64
+ Depth int64
+ Decay float64
+}
+
+type TopKInfoCmd struct {
+ baseCmd
+
+ val TopKInfo
+}
+
+func NewTopKInfoCmd(ctx context.Context, args ...interface{}) *TopKInfoCmd {
+ return &TopKInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TopKInfoCmd) SetVal(val TopKInfo) {
+ cmd.val = val
+}
+
+func (cmd *TopKInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TopKInfoCmd) Val() TopKInfo {
+ return cmd.val
+}
+
+func (cmd *TopKInfoCmd) Result() (TopKInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TopKInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result TopKInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "k":
+ result.K, err = rd.ReadInt()
+ case "width":
+ result.Width, err = rd.ReadInt()
+ case "depth":
+ result.Depth, err = rd.ReadInt()
+ case "decay":
+ result.Decay, err = rd.ReadFloat()
+ default:
+ return fmt.Errorf("redis: topk.info unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// TopKInfo returns information about a Top-K filter.
+// For more information - https://redis.io/commands/topk.info/
+func (c cmdable) TopKInfo(ctx context.Context, key string) *TopKInfoCmd {
+ args := []interface{}{"TOPK.INFO", key}
+
+ cmd := NewTopKInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKQuery check if multiple elements exist in a Top-K filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/topk.query/
+func (c cmdable) TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.QUERY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKCount returns an estimate of the number of times an item may be in a Top-K filter.
+// For more information - https://redis.io/commands/topk.count/
+func (c cmdable) TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.COUNT"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKIncrBy increases the count of one or more items in a Top-K filter.
+// For more information - https://redis.io/commands/topk.incrby/
+func (c cmdable) TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.INCRBY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKList returns all items in Top-K list.
+// For more information - https://redis.io/commands/topk.list/
+func (c cmdable) TopKList(ctx context.Context, key string) *StringSliceCmd {
+ args := []interface{}{"TOPK.LIST", key}
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKListWithCount returns all items in Top-K list with their respective count.
+// For more information - https://redis.io/commands/topk.list/
+func (c cmdable) TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd {
+ args := []interface{}{"TOPK.LIST", key, "WITHCOUNT"}
+
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// t-digest commands
+// --------------------------------------------
+
+// TDigestAdd adds one or more elements to a t-Digest data structure.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.add/
+func (c cmdable) TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.ADD"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestByRank returns an array of values from a t-Digest data structure based on their rank.
+// The rank of an element is its position in the sorted list of all elements in the t-Digest.
+// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.byrank/
+func (c cmdable) TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(rank))
+ args[0] = "TDIGEST.BYRANK"
+ args[1] = key
+
+ // Convert uint slice to []interface{}
+ interfaceSlice := make([]interface{}, len(rank))
+ for i, v := range rank {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestByRevRank returns an array of values from a t-Digest data structure based on their reverse rank.
+// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order.
+// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.byrevrank/
+func (c cmdable) TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(rank))
+ args[0] = "TDIGEST.BYREVRANK"
+ args[1] = key
+
+ // Convert uint slice to []interface{}
+ interfaceSlice := make([]interface{}, len(rank))
+ for i, v := range rank {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCDF returns an array of cumulative distribution function (CDF) values for one or more elements in a t-Digest data structure.
+// The CDF value for an element is the fraction of all elements in the t-Digest that are less than or equal to it.
+// Returns an array of floats representing the CDF values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.cdf/
+func (c cmdable) TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.CDF"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCreate creates an empty t-Digest data structure with default parameters.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.create/
+func (c cmdable) TDigestCreate(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TDIGEST.CREATE", key}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCreateWithCompression creates an empty t-Digest data structure with a specified compression parameter.
+// The compression parameter controls the accuracy and memory usage of the t-Digest.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.create/
+func (c cmdable) TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd {
+ args := []interface{}{"TDIGEST.CREATE", key, "COMPRESSION", compression}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TDigestInfo struct {
+ Compression int64
+ Capacity int64
+ MergedNodes int64
+ UnmergedNodes int64
+ MergedWeight int64
+ UnmergedWeight int64
+ Observations int64
+ TotalCompressions int64
+ MemoryUsage int64
+}
+
+type TDigestInfoCmd struct {
+ baseCmd
+
+ val TDigestInfo
+}
+
+func NewTDigestInfoCmd(ctx context.Context, args ...interface{}) *TDigestInfoCmd {
+ return &TDigestInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TDigestInfoCmd) SetVal(val TDigestInfo) {
+ cmd.val = val
+}
+
+func (cmd *TDigestInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TDigestInfoCmd) Val() TDigestInfo {
+ return cmd.val
+}
+
+func (cmd *TDigestInfoCmd) Result() (TDigestInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TDigestInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result TDigestInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Compression":
+ result.Compression, err = rd.ReadInt()
+ case "Capacity":
+ result.Capacity, err = rd.ReadInt()
+ case "Merged nodes":
+ result.MergedNodes, err = rd.ReadInt()
+ case "Unmerged nodes":
+ result.UnmergedNodes, err = rd.ReadInt()
+ case "Merged weight":
+ result.MergedWeight, err = rd.ReadInt()
+ case "Unmerged weight":
+ result.UnmergedWeight, err = rd.ReadInt()
+ case "Observations":
+ result.Observations, err = rd.ReadInt()
+ case "Total compressions":
+ result.TotalCompressions, err = rd.ReadInt()
+ case "Memory usage":
+ result.MemoryUsage, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: tdigest.info unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// TDigestInfo returns information about a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.info/
+func (c cmdable) TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd {
+ args := []interface{}{"TDIGEST.INFO", key}
+
+ cmd := NewTDigestInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestMax returns the maximum value from a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.max/
+func (c cmdable) TDigestMax(ctx context.Context, key string) *FloatCmd {
+ args := []interface{}{"TDIGEST.MAX", key}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TDigestMergeOptions struct {
+ Compression int64
+ Override bool
+}
+
+// TDigestMerge merges multiple t-Digest data structures into a single t-Digest.
+// This function also allows for specifying additional options such as compression and override behavior.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.merge/
+func (c cmdable) TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd {
+ args := []interface{}{"TDIGEST.MERGE", destKey, len(sourceKeys)}
+
+ for _, sourceKey := range sourceKeys {
+ args = append(args, sourceKey)
+ }
+
+ if options != nil {
+ if options.Compression != 0 {
+ args = append(args, "COMPRESSION", options.Compression)
+ }
+ if options.Override {
+ args = append(args, "OVERRIDE")
+ }
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestMin returns the minimum value from a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.min/
+func (c cmdable) TDigestMin(ctx context.Context, key string) *FloatCmd {
+ args := []interface{}{"TDIGEST.MIN", key}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestQuantile returns an array of quantile values for one or more elements in a t-Digest data structure.
+// The quantile value for an element is the fraction of all elements in the t-Digest that are less than or equal to it.
+// Returns an array of floats representing the quantile values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.quantile/
+func (c cmdable) TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.QUANTILE"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestRank returns an array of rank values for one or more elements in a t-Digest data structure.
+// The rank of an element is its position in the sorted list of all elements in the t-Digest.
+// Returns an array of integers representing the rank values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.rank/
+func (c cmdable) TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "TDIGEST.RANK"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(values))
+ for i, v := range values {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestReset resets a t-Digest data structure to its initial state.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.reset/
+func (c cmdable) TDigestReset(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TDIGEST.RESET", key}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestRevRank returns an array of reverse rank values for one or more elements in a t-Digest data structure.
+// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order.
+// Returns an array of integers representing the reverse rank values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.revrank/
+func (c cmdable) TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "TDIGEST.REVRANK"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(values))
+ for i, v := range values {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestTrimmedMean returns the trimmed mean value from a t-Digest data structure.
+// The trimmed mean is calculated by removing a specified fraction of the highest and lowest values from the t-Digest and then calculating the mean of the remaining values.
+// Returns a float representing the trimmed mean value or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.trimmed_mean/
+func (c cmdable) TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd {
+ args := []interface{}{"TDIGEST.TRIMMED_MEAN", key, lowCutQuantile, highCutQuantile}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic_test.go
new file mode 100644
index 0000000..0610c51
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic_test.go
@@ -0,0 +1,733 @@
+package redis_test
+
+import (
+ "context"
+ "fmt"
+ "math"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9"
+)
+
+var _ = Describe("Probabilistic commands", Label("probabilistic"), func() {
+ ctx := context.TODO()
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(&redis.Options{Addr: ":6379"})
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ Describe("bloom", Label("bloom"), func() {
+ It("should BFAdd", Label("bloom", "bfadd"), func() {
+ resultAdd, err := client.BFAdd(ctx, "testbf1", 1).Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeTrue())
+
+ resultInfo, err := client.BFInfo(ctx, "testbf1").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(resultInfo.ItemsInserted).To(BeEquivalentTo(int64(1)))
+ })
+
+ It("should BFCard", Label("bloom", "bfcard"), func() {
+ // This is a probabilistic data structure, and it's not always guaranteed that we will get back
+ // the exact number of inserted items, during hash collisions
+ // But with such a low number of items (only 3),
+ // the probability of a collision is very low, so we can expect to get back the exact number of items
+ _, err := client.BFAdd(ctx, "testbf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.BFAdd(ctx, "testbf1", "item2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.BFAdd(ctx, "testbf1", 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.BFCard(ctx, "testbf1").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(int64(3)))
+ })
+
+ It("should BFExists", Label("bloom", "bfexists"), func() {
+ exists, err := client.BFExists(ctx, "testbf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeFalse())
+
+ _, err = client.BFAdd(ctx, "testbf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ exists, err = client.BFExists(ctx, "testbf1", "item1").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeTrue())
+ })
+
+ It("should BFInfo and BFReserve", Label("bloom", "bfinfo", "bfreserve"), func() {
+ err := client.BFReserve(ctx, "testbf1", 0.001, 2000).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.BFInfo(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(result.Capacity).To(BeEquivalentTo(int64(2000)))
+ })
+
+ It("should BFInfoCapacity, BFInfoSize, BFInfoFilters, BFInfoItems, BFInfoExpansion, ", Label("bloom", "bfinfocapacity", "bfinfosize", "bfinfofilters", "bfinfoitems", "bfinfoexpansion"), func() {
+ err := client.BFReserve(ctx, "testbf1", 0.001, 2000).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.BFInfoCapacity(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Capacity).To(BeEquivalentTo(int64(2000)))
+
+ result, err = client.BFInfoItems(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.ItemsInserted).To(BeEquivalentTo(int64(0)))
+
+ result, err = client.BFInfoSize(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Size).To(BeEquivalentTo(int64(4056)))
+
+ err = client.BFReserveExpansion(ctx, "testbf2", 0.001, 2000, 3).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err = client.BFInfoFilters(ctx, "testbf2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Filters).To(BeEquivalentTo(int64(1)))
+
+ result, err = client.BFInfoExpansion(ctx, "testbf2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.ExpansionRate).To(BeEquivalentTo(int64(3)))
+ })
+
+ It("should BFInsert", Label("bloom", "bfinsert"), func() {
+ options := &redis.BFInsertOptions{
+ Capacity: 2000,
+ Error: 0.001,
+ Expansion: 3,
+ NonScaling: false,
+ NoCreate: true,
+ }
+
+ resultInsert, err := client.BFInsert(ctx, "testbf1", options, "item1").Result()
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError("ERR not found"))
+
+ options = &redis.BFInsertOptions{
+ Capacity: 2000,
+ Error: 0.001,
+ Expansion: 3,
+ NonScaling: false,
+ NoCreate: false,
+ }
+
+ resultInsert, err = client.BFInsert(ctx, "testbf1", options, "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultInsert)).To(BeEquivalentTo(1))
+
+ exists, err := client.BFExists(ctx, "testbf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeTrue())
+
+ result, err := client.BFInfo(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(result.Capacity).To(BeEquivalentTo(int64(2000)))
+ Expect(result.ExpansionRate).To(BeEquivalentTo(int64(3)))
+ })
+
+ It("should BFMAdd", Label("bloom", "bfmadd"), func() {
+ resultAdd, err := client.BFMAdd(ctx, "testbf1", "item1", "item2", "item3").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultAdd)).To(Equal(3))
+
+ resultInfo, err := client.BFInfo(ctx, "testbf1").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(resultInfo.ItemsInserted).To(BeEquivalentTo(int64(3)))
+ resultAdd2, err := client.BFMAdd(ctx, "testbf1", "item1", "item2", "item4").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd2[0]).To(BeFalse())
+ Expect(resultAdd2[1]).To(BeFalse())
+ Expect(resultAdd2[2]).To(BeTrue())
+ })
+
+ It("should BFMExists", Label("bloom", "bfmexists"), func() {
+ exist, err := client.BFMExists(ctx, "testbf1", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(exist)).To(Equal(3))
+ Expect(exist[0]).To(BeFalse())
+ Expect(exist[1]).To(BeFalse())
+ Expect(exist[2]).To(BeFalse())
+
+ _, err = client.BFMAdd(ctx, "testbf1", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ exist, err = client.BFMExists(ctx, "testbf1", "item1", "item2", "item3", "item4").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(exist)).To(Equal(4))
+ Expect(exist[0]).To(BeTrue())
+ Expect(exist[1]).To(BeTrue())
+ Expect(exist[2]).To(BeTrue())
+ Expect(exist[3]).To(BeFalse())
+ })
+
+ It("should BFReserveExpansion", Label("bloom", "bfreserveexpansion"), func() {
+ err := client.BFReserveExpansion(ctx, "testbf1", 0.001, 2000, 3).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.BFInfo(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(result.Capacity).To(BeEquivalentTo(int64(2000)))
+ Expect(result.ExpansionRate).To(BeEquivalentTo(int64(3)))
+ })
+
+ It("should BFReserveNonScaling", Label("bloom", "bfreservenonscaling"), func() {
+ err := client.BFReserveNonScaling(ctx, "testbfns1", 0.001, 1000).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ _, err = client.BFInfo(ctx, "testbfns1").Result()
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("should BFScanDump and BFLoadChunk", Label("bloom", "bfscandump", "bfloadchunk"), func() {
+ err := client.BFReserve(ctx, "testbfsd1", 0.001, 3000).Err()
+ Expect(err).NotTo(HaveOccurred())
+ for i := 0; i < 1000; i++ {
+ client.BFAdd(ctx, "testbfsd1", i)
+ }
+ infBefore := client.BFInfoSize(ctx, "testbfsd1")
+ fd := []redis.ScanDump{}
+ sd, err := client.BFScanDump(ctx, "testbfsd1", 0).Result()
+ for {
+ if sd.Iter == 0 {
+ break
+ }
+ Expect(err).NotTo(HaveOccurred())
+ fd = append(fd, sd)
+ sd, err = client.BFScanDump(ctx, "testbfsd1", sd.Iter).Result()
+ }
+ client.Del(ctx, "testbfsd1")
+ for _, e := range fd {
+ client.BFLoadChunk(ctx, "testbfsd1", e.Iter, e.Data)
+ }
+ infAfter := client.BFInfoSize(ctx, "testbfsd1")
+ Expect(infBefore).To(BeEquivalentTo(infAfter))
+ })
+
+ It("should BFReserveWithArgs", Label("bloom", "bfreserveargs"), func() {
+ options := &redis.BFReserveOptions{
+ Capacity: 2000,
+ Error: 0.001,
+ Expansion: 3,
+ NonScaling: false,
+ }
+ err := client.BFReserveWithArgs(ctx, "testbf", options).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.BFInfo(ctx, "testbf").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(result.Capacity).To(BeEquivalentTo(int64(2000)))
+ Expect(result.ExpansionRate).To(BeEquivalentTo(int64(3)))
+ })
+ })
+
+ Describe("cuckoo", Label("cuckoo"), func() {
+ It("should CFAdd", Label("cuckoo", "cfadd"), func() {
+ add, err := client.CFAdd(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(add).To(BeTrue())
+
+ exists, err := client.CFExists(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeTrue())
+
+ info, err := client.CFInfo(ctx, "testcf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info).To(BeAssignableToTypeOf(redis.CFInfo{}))
+ Expect(info.NumItemsInserted).To(BeEquivalentTo(int64(1)))
+ })
+
+ It("should CFAddNX", Label("cuckoo", "cfaddnx"), func() {
+ add, err := client.CFAddNX(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(add).To(BeTrue())
+
+ exists, err := client.CFExists(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeTrue())
+
+ result, err := client.CFAddNX(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeFalse())
+
+ info, err := client.CFInfo(ctx, "testcf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info).To(BeAssignableToTypeOf(redis.CFInfo{}))
+ Expect(info.NumItemsInserted).To(BeEquivalentTo(int64(1)))
+ })
+
+ It("should CFCount", Label("cuckoo", "cfcount"), func() {
+ err := client.CFAdd(ctx, "testcf1", "item1").Err()
+ cnt, err := client.CFCount(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cnt).To(BeEquivalentTo(int64(1)))
+
+ err = client.CFAdd(ctx, "testcf1", "item1").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ cnt, err = client.CFCount(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cnt).To(BeEquivalentTo(int64(2)))
+ })
+
+ It("should CFDel and CFExists", Label("cuckoo", "cfdel", "cfexists"), func() {
+ err := client.CFAdd(ctx, "testcf1", "item1").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ exists, err := client.CFExists(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeTrue())
+
+ del, err := client.CFDel(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(del).To(BeTrue())
+
+ exists, err = client.CFExists(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeFalse())
+ })
+
+ It("should CFInfo and CFReserve", Label("cuckoo", "cfinfo", "cfreserve"), func() {
+ err := client.CFReserve(ctx, "testcf1", 1000).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CFReserveExpansion(ctx, "testcfe1", 1000, 1).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CFReserveBucketSize(ctx, "testcfbs1", 1000, 4).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CFReserveMaxIterations(ctx, "testcfmi1", 1000, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.CFInfo(ctx, "testcf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.CFInfo{}))
+ })
+
+ It("should CFScanDump and CFLoadChunk", Label("bloom", "cfscandump", "cfloadchunk"), func() {
+ err := client.CFReserve(ctx, "testcfsd1", 1000).Err()
+ Expect(err).NotTo(HaveOccurred())
+ for i := 0; i < 1000; i++ {
+ Item := fmt.Sprintf("item%d", i)
+ client.CFAdd(ctx, "testcfsd1", Item)
+ }
+ infBefore := client.CFInfo(ctx, "testcfsd1")
+ fd := []redis.ScanDump{}
+ sd, err := client.CFScanDump(ctx, "testcfsd1", 0).Result()
+ for {
+ if sd.Iter == 0 {
+ break
+ }
+ Expect(err).NotTo(HaveOccurred())
+ fd = append(fd, sd)
+ sd, err = client.CFScanDump(ctx, "testcfsd1", sd.Iter).Result()
+ }
+ client.Del(ctx, "testcfsd1")
+ for _, e := range fd {
+ client.CFLoadChunk(ctx, "testcfsd1", e.Iter, e.Data)
+ }
+ infAfter := client.CFInfo(ctx, "testcfsd1")
+ Expect(infBefore).To(BeEquivalentTo(infAfter))
+ })
+
+ It("should CFInfo and CFReserveWithArgs", Label("cuckoo", "cfinfo", "cfreserveargs"), func() {
+ args := &redis.CFReserveOptions{
+ Capacity: 2048,
+ BucketSize: 3,
+ MaxIterations: 15,
+ Expansion: 2,
+ }
+
+ err := client.CFReserveWithArgs(ctx, "testcf1", args).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.CFInfo(ctx, "testcf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.CFInfo{}))
+ Expect(result.BucketSize).To(BeEquivalentTo(int64(3)))
+ Expect(result.MaxIteration).To(BeEquivalentTo(int64(15)))
+ Expect(result.ExpansionRate).To(BeEquivalentTo(int64(2)))
+ })
+
+ It("should CFInsert", Label("cuckoo", "cfinsert"), func() {
+ args := &redis.CFInsertOptions{
+ Capacity: 3000,
+ NoCreate: true,
+ }
+
+ result, err := client.CFInsert(ctx, "testcf1", args, "item1", "item2", "item3").Result()
+ Expect(err).To(HaveOccurred())
+
+ args = &redis.CFInsertOptions{
+ Capacity: 3000,
+ NoCreate: false,
+ }
+
+ result, err = client.CFInsert(ctx, "testcf1", args, "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(3))
+ })
+
+ It("should CFInsertNX", Label("cuckoo", "cfinsertnx"), func() {
+ args := &redis.CFInsertOptions{
+ Capacity: 3000,
+ NoCreate: true,
+ }
+
+ result, err := client.CFInsertNX(ctx, "testcf1", args, "item1", "item2", "item2").Result()
+ Expect(err).To(HaveOccurred())
+
+ args = &redis.CFInsertOptions{
+ Capacity: 3000,
+ NoCreate: false,
+ }
+
+ result, err = client.CFInsertNX(ctx, "testcf2", args, "item1", "item2", "item2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(3))
+ Expect(result[0]).To(BeEquivalentTo(int64(1)))
+ Expect(result[1]).To(BeEquivalentTo(int64(1)))
+ Expect(result[2]).To(BeEquivalentTo(int64(0)))
+ })
+
+ It("should CFMexists", Label("cuckoo", "cfmexists"), func() {
+ err := client.CFInsert(ctx, "testcf1", nil, "item1", "item2", "item3").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.CFMExists(ctx, "testcf1", "item1", "item2", "item3", "item4").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(4))
+ Expect(result[0]).To(BeTrue())
+ Expect(result[1]).To(BeTrue())
+ Expect(result[2]).To(BeTrue())
+ Expect(result[3]).To(BeFalse())
+ })
+ })
+
+ Describe("CMS", Label("cms"), func() {
+ It("should CMSIncrBy", Label("cms", "cmsincrby"), func() {
+ err := client.CMSInitByDim(ctx, "testcms1", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.CMSIncrBy(ctx, "testcms1", "item1", 1, "item2", 2, "item3", 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(3))
+ Expect(result[0]).To(BeEquivalentTo(int64(1)))
+ Expect(result[1]).To(BeEquivalentTo(int64(2)))
+ Expect(result[2]).To(BeEquivalentTo(int64(3)))
+ })
+
+ It("should CMSInitByDim and CMSInfo", Label("cms", "cmsinitbydim", "cmsinfo"), func() {
+ err := client.CMSInitByDim(ctx, "testcms1", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err := client.CMSInfo(ctx, "testcms1").Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(info).To(BeAssignableToTypeOf(redis.CMSInfo{}))
+ Expect(info.Width).To(BeEquivalentTo(int64(5)))
+ Expect(info.Depth).To(BeEquivalentTo(int64(10)))
+ })
+
+ It("should CMSInitByProb", Label("cms", "cmsinitbyprob"), func() {
+ err := client.CMSInitByProb(ctx, "testcms1", 0.002, 0.01).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err := client.CMSInfo(ctx, "testcms1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info).To(BeAssignableToTypeOf(redis.CMSInfo{}))
+ })
+
+ It("should CMSMerge, CMSMergeWithWeight and CMSQuery", Label("cms", "cmsmerge", "cmsquery", "NonRedisEnterprise"), func() {
+ err := client.CMSMerge(ctx, "destCms1", "testcms2", "testcms3").Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError("CMS: key does not exist"))
+
+ err = client.CMSInitByDim(ctx, "destCms1", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CMSInitByDim(ctx, "destCms2", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CMSInitByDim(ctx, "cms1", 2, 20).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CMSInitByDim(ctx, "cms2", 3, 20).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.CMSMerge(ctx, "destCms1", "cms1", "cms2").Err()
+ Expect(err).To(MatchError("CMS: width/depth is not equal"))
+
+ client.Del(ctx, "cms1", "cms2")
+
+ err = client.CMSInitByDim(ctx, "cms1", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CMSInitByDim(ctx, "cms2", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ client.CMSIncrBy(ctx, "cms1", "item1", 1, "item2", 2)
+ client.CMSIncrBy(ctx, "cms2", "item2", 2, "item3", 3)
+
+ err = client.CMSMerge(ctx, "destCms1", "cms1", "cms2").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.CMSQuery(ctx, "destCms1", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(3))
+ Expect(result[0]).To(BeEquivalentTo(int64(1)))
+ Expect(result[1]).To(BeEquivalentTo(int64(4)))
+ Expect(result[2]).To(BeEquivalentTo(int64(3)))
+
+ sourceSketches := map[string]int64{
+ "cms1": 1,
+ "cms2": 2,
+ }
+ err = client.CMSMergeWithWeight(ctx, "destCms2", sourceSketches).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err = client.CMSQuery(ctx, "destCms2", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(3))
+ Expect(result[0]).To(BeEquivalentTo(int64(1)))
+ Expect(result[1]).To(BeEquivalentTo(int64(6)))
+ Expect(result[2]).To(BeEquivalentTo(int64(6)))
+ })
+ })
+
+ Describe("TopK", Label("topk"), func() {
+ It("should TopKReserve, TopKInfo, TopKAdd, TopKQuery, TopKCount, TopKIncrBy, TopKList, TopKListWithCount", Label("topk", "topkreserve", "topkinfo", "topkadd", "topkquery", "topkcount", "topkincrby", "topklist", "topklistwithcount"), func() {
+ err := client.TopKReserve(ctx, "topk1", 3).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ resultInfo, err := client.TopKInfo(ctx, "topk1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo.K).To(BeEquivalentTo(int64(3)))
+
+ resultAdd, err := client.TopKAdd(ctx, "topk1", "item1", "item2", 3, "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultAdd)).To(BeEquivalentTo(int64(4)))
+
+ resultQuery, err := client.TopKQuery(ctx, "topk1", "item1", "item2", 4, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultQuery)).To(BeEquivalentTo(4))
+ Expect(resultQuery[0]).To(BeTrue())
+ Expect(resultQuery[1]).To(BeTrue())
+ Expect(resultQuery[2]).To(BeFalse())
+ Expect(resultQuery[3]).To(BeTrue())
+
+ resultCount, err := client.TopKCount(ctx, "topk1", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultCount)).To(BeEquivalentTo(3))
+ Expect(resultCount[0]).To(BeEquivalentTo(int64(2)))
+ Expect(resultCount[1]).To(BeEquivalentTo(int64(1)))
+ Expect(resultCount[2]).To(BeEquivalentTo(int64(0)))
+
+ resultIncr, err := client.TopKIncrBy(ctx, "topk1", "item1", 5, "item2", 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultIncr)).To(BeEquivalentTo(2))
+
+ resultCount, err = client.TopKCount(ctx, "topk1", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultCount)).To(BeEquivalentTo(3))
+ Expect(resultCount[0]).To(BeEquivalentTo(int64(7)))
+ Expect(resultCount[1]).To(BeEquivalentTo(int64(11)))
+ Expect(resultCount[2]).To(BeEquivalentTo(int64(0)))
+
+ resultList, err := client.TopKList(ctx, "topk1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultList)).To(BeEquivalentTo(3))
+ Expect(resultList).To(ContainElements("item2", "item1", "3"))
+
+ resultListWithCount, err := client.TopKListWithCount(ctx, "topk1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultListWithCount)).To(BeEquivalentTo(3))
+ Expect(resultListWithCount["3"]).To(BeEquivalentTo(int64(1)))
+ Expect(resultListWithCount["item1"]).To(BeEquivalentTo(int64(7)))
+ Expect(resultListWithCount["item2"]).To(BeEquivalentTo(int64(11)))
+ })
+
+ It("should TopKReserveWithOptions", Label("topk", "topkreservewithoptions"), func() {
+ err := client.TopKReserveWithOptions(ctx, "topk1", 3, 1500, 8, 0.5).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ resultInfo, err := client.TopKInfo(ctx, "topk1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo.K).To(BeEquivalentTo(int64(3)))
+ Expect(resultInfo.Width).To(BeEquivalentTo(int64(1500)))
+ Expect(resultInfo.Depth).To(BeEquivalentTo(int64(8)))
+ Expect(resultInfo.Decay).To(BeEquivalentTo(0.5))
+ })
+ })
+
+ Describe("t-digest", Label("tdigest"), func() {
+ It("should TDigestAdd, TDigestCreate, TDigestInfo, TDigestByRank, TDigestByRevRank, TDigestCDF, TDigestMax, TDigestMin, TDigestQuantile, TDigestRank, TDigestRevRank, TDigestTrimmedMean, TDigestReset, ", Label("tdigest", "tdigestadd", "tdigestcreate", "tdigestinfo", "tdigestbyrank", "tdigestbyrevrank", "tdigestcdf", "tdigestmax", "tdigestmin", "tdigestquantile", "tdigestrank", "tdigestrevrank", "tdigesttrimmedmean", "tdigestreset"), func() {
+ err := client.TDigestCreate(ctx, "tdigest1").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err := client.TDigestInfo(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info.Observations).To(BeEquivalentTo(int64(0)))
+
+ // Test with empty sketch
+ byRank, err := client.TDigestByRank(ctx, "tdigest1", 0, 1, 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(byRank)).To(BeEquivalentTo(4))
+
+ byRevRank, err := client.TDigestByRevRank(ctx, "tdigest1", 0, 1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(byRevRank)).To(BeEquivalentTo(3))
+
+ cdf, err := client.TDigestCDF(ctx, "tdigest1", 15, 35, 70).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(cdf)).To(BeEquivalentTo(3))
+
+ max, err := client.TDigestMax(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(math.IsNaN(max)).To(BeTrue())
+
+ min, err := client.TDigestMin(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(math.IsNaN(min)).To(BeTrue())
+
+ quantile, err := client.TDigestQuantile(ctx, "tdigest1", 0.1, 0.2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(quantile)).To(BeEquivalentTo(2))
+
+ rank, err := client.TDigestRank(ctx, "tdigest1", 10, 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(rank)).To(BeEquivalentTo(2))
+
+ revRank, err := client.TDigestRevRank(ctx, "tdigest1", 10, 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(revRank)).To(BeEquivalentTo(2))
+
+ trimmedMean, err := client.TDigestTrimmedMean(ctx, "tdigest1", 0.1, 0.6).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(math.IsNaN(trimmedMean)).To(BeTrue())
+
+ // Add elements
+ err = client.TDigestAdd(ctx, "tdigest1", 10, 20, 30, 40, 50, 60, 70, 80, 90, 100).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err = client.TDigestInfo(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info.Observations).To(BeEquivalentTo(int64(10)))
+
+ byRank, err = client.TDigestByRank(ctx, "tdigest1", 0, 1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(byRank)).To(BeEquivalentTo(3))
+ Expect(byRank[0]).To(BeEquivalentTo(float64(10)))
+ Expect(byRank[1]).To(BeEquivalentTo(float64(20)))
+ Expect(byRank[2]).To(BeEquivalentTo(float64(30)))
+
+ byRevRank, err = client.TDigestByRevRank(ctx, "tdigest1", 0, 1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(byRevRank)).To(BeEquivalentTo(3))
+ Expect(byRevRank[0]).To(BeEquivalentTo(float64(100)))
+ Expect(byRevRank[1]).To(BeEquivalentTo(float64(90)))
+ Expect(byRevRank[2]).To(BeEquivalentTo(float64(80)))
+
+ cdf, err = client.TDigestCDF(ctx, "tdigest1", 15, 35, 70).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(cdf)).To(BeEquivalentTo(3))
+ Expect(cdf[0]).To(BeEquivalentTo(0.1))
+ Expect(cdf[1]).To(BeEquivalentTo(0.3))
+ Expect(cdf[2]).To(BeEquivalentTo(0.65))
+
+ max, err = client.TDigestMax(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(max).To(BeEquivalentTo(float64(100)))
+
+ min, err = client.TDigestMin(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(min).To(BeEquivalentTo(float64(10)))
+
+ quantile, err = client.TDigestQuantile(ctx, "tdigest1", 0.1, 0.2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(quantile)).To(BeEquivalentTo(2))
+ Expect(quantile[0]).To(BeEquivalentTo(float64(20)))
+ Expect(quantile[1]).To(BeEquivalentTo(float64(30)))
+
+ rank, err = client.TDigestRank(ctx, "tdigest1", 10, 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(rank)).To(BeEquivalentTo(2))
+ Expect(rank[0]).To(BeEquivalentTo(int64(0)))
+ Expect(rank[1]).To(BeEquivalentTo(int64(1)))
+
+ revRank, err = client.TDigestRevRank(ctx, "tdigest1", 10, 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(revRank)).To(BeEquivalentTo(2))
+ Expect(revRank[0]).To(BeEquivalentTo(int64(9)))
+ Expect(revRank[1]).To(BeEquivalentTo(int64(8)))
+
+ trimmedMean, err = client.TDigestTrimmedMean(ctx, "tdigest1", 0.1, 0.6).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(trimmedMean).To(BeEquivalentTo(float64(40)))
+
+ reset, err := client.TDigestReset(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(reset).To(BeEquivalentTo("OK"))
+ })
+
+ It("should TDigestCreateWithCompression", Label("tdigest", "tcreatewithcompression"), func() {
+ err := client.TDigestCreateWithCompression(ctx, "tdigest1", 2000).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err := client.TDigestInfo(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info.Compression).To(BeEquivalentTo(int64(2000)))
+ })
+
+ It("should TDigestMerge", Label("tdigest", "tmerge", "NonRedisEnterprise"), func() {
+ err := client.TDigestCreate(ctx, "tdigest1").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.TDigestAdd(ctx, "tdigest1", 10, 20, 30, 40, 50, 60, 70, 80, 90, 100).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.TDigestCreate(ctx, "tdigest2").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.TDigestAdd(ctx, "tdigest2", 15, 25, 35, 45, 55, 65, 75, 85, 95, 105).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.TDigestCreate(ctx, "tdigest3").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.TDigestAdd(ctx, "tdigest3", 50, 60, 70, 80, 90, 100, 110, 120, 130, 140).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ options := &redis.TDigestMergeOptions{
+ Compression: 1000,
+ Override: false,
+ }
+ err = client.TDigestMerge(ctx, "tdigest1", options, "tdigest2", "tdigest3").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err := client.TDigestInfo(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info.Observations).To(BeEquivalentTo(int64(30)))
+ Expect(info.Compression).To(BeEquivalentTo(int64(1000)))
+
+ max, err := client.TDigestMax(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(max).To(BeEquivalentTo(float64(140)))
+ })
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub.go
index efc2354..5df537c 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub.go
@@ -7,9 +7,9 @@ import (
"sync"
"time"
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
)
// PubSub implements Pub/Sub commands as described in
@@ -24,10 +24,11 @@ type PubSub struct {
newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
closeConn func(*pool.Conn) error
- mu sync.Mutex
- cn *pool.Conn
- channels map[string]struct{}
- patterns map[string]struct{}
+ mu sync.Mutex
+ cn *pool.Conn
+ channels map[string]struct{}
+ patterns map[string]struct{}
+ schannels map[string]struct{}
closed bool
exit chan struct{}
@@ -46,6 +47,7 @@ func (c *PubSub) init() {
func (c *PubSub) String() string {
channels := mapKeys(c.channels)
channels = append(channels, mapKeys(c.patterns)...)
+ channels = append(channels, mapKeys(c.schannels)...)
return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
}
@@ -82,7 +84,7 @@ func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, er
}
func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
- return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return cn.WithWriter(context.Background(), c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmd)
})
}
@@ -101,6 +103,13 @@ func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
}
}
+ if len(c.schannels) > 0 {
+ err := c._subscribe(ctx, cn, "ssubscribe", mapKeys(c.schannels))
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
return firstErr
}
@@ -208,15 +217,38 @@ func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
return err
}
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *PubSub) SSubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "ssubscribe", channels...)
+ if c.schannels == nil {
+ c.schannels = make(map[string]struct{})
+ }
+ for _, s := range channels {
+ c.schannels[s] = struct{}{}
+ }
+ return err
+}
+
// Unsubscribe the client from the given channels, or from all of
// them if none is given.
func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
- for _, channel := range channels {
- delete(c.channels, channel)
+ if len(channels) > 0 {
+ for _, channel := range channels {
+ delete(c.channels, channel)
+ }
+ } else {
+ // Unsubscribe from all channels.
+ for channel := range c.channels {
+ delete(c.channels, channel)
+ }
}
+
err := c.subscribe(ctx, "unsubscribe", channels...)
return err
}
@@ -227,13 +259,42 @@ func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
- for _, pattern := range patterns {
- delete(c.patterns, pattern)
+ if len(patterns) > 0 {
+ for _, pattern := range patterns {
+ delete(c.patterns, pattern)
+ }
+ } else {
+ // Unsubscribe from all patterns.
+ for pattern := range c.patterns {
+ delete(c.patterns, pattern)
+ }
}
+
err := c.subscribe(ctx, "punsubscribe", patterns...)
return err
}
+// SUnsubscribe unsubscribes the client from the given shard channels,
+// or from all of them if none is given.
+func (c *PubSub) SUnsubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if len(channels) > 0 {
+ for _, channel := range channels {
+ delete(c.schannels, channel)
+ }
+ } else {
+ // Unsubscribe from all channels.
+ for channel := range c.schannels {
+ delete(c.schannels, channel)
+ }
+ }
+
+ err := c.subscribe(ctx, "sunsubscribe", channels...)
+ return err
+}
+
func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
cn, err := c.conn(ctx, channels)
if err != nil {
@@ -311,7 +372,7 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
}, nil
case []interface{}:
switch kind := reply[0].(string); kind {
- case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe", "ssubscribe", "sunsubscribe":
// Can be nil in case of "unsubscribe".
channel, _ := reply[1].(string)
return &Subscription{
@@ -319,7 +380,7 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
Channel: channel,
Count: int(reply[2].(int64)),
}, nil
- case "message":
+ case "message", "smessage":
switch payload := reply[2].(type) {
case string:
return &Message{
@@ -371,7 +432,7 @@ func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (int
return nil, err
}
- err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
+ err = cn.WithReader(context.Background(), timeout, func(rd *proto.Reader) error {
return c.cmd.readReply(rd)
})
@@ -426,11 +487,11 @@ func (c *PubSub) getContext() context.Context {
// Channel returns a Go channel for concurrently receiving messages.
// The channel is closed together with the PubSub. If the Go channel
-// is blocked full for 30 seconds the message is dropped.
+// is blocked full for 1 minute the message is dropped.
// Receive* APIs can not be used after channel is created.
//
// go-redis periodically sends ping messages to test connection health
-// and re-subscribes if ping can not not received for 30 seconds.
+// and re-subscribes if ping can not not received for 1 minute.
func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
c.chOnce.Do(func() {
c.msgCh = newChannel(c, opts...)
@@ -456,9 +517,9 @@ func (c *PubSub) ChannelSize(size int) <-chan *Message {
// reconnections.
//
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
-func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
+func (c *PubSub) ChannelWithSubscriptions(opts ...ChannelOption) <-chan interface{} {
c.chOnce.Do(func() {
- c.allCh = newChannel(c, WithChannelSize(size))
+ c.allCh = newChannel(c, opts...)
c.allCh.initAllChan()
})
if c.allCh == nil {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_commands.go
new file mode 100644
index 0000000..28622aa
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_commands.go
@@ -0,0 +1,76 @@
+package redis
+
+import "context"
+
+type PubSubCmdable interface {
+ Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+ SPublish(ctx context.Context, channel string, message interface{}) *IntCmd
+ PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+ PubSubNumPat(ctx context.Context) *IntCmd
+ PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+}
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "publish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SPublish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "spublish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "channels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "numsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "shardchannels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "shardnumsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "pubsub", "numpat")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_test.go
index 2dfa66b..a761006 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_test.go
@@ -1,30 +1,24 @@
package redis_test
import (
- "context"
"io"
"net"
"sync"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("PubSub", func() {
var client *redis.Client
- var clientID int64
BeforeEach(func() {
opt := redisOptions()
opt.MinIdleConns = 0
- opt.MaxConnAge = 0
- opt.OnConnect = func(ctx context.Context, cn *redis.Conn) (err error) {
- clientID, err = cn.ClientID(ctx).Result()
- return err
- }
+ opt.ConnMaxLifetime = 0
client = redis.NewClient(opt)
Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
})
@@ -108,6 +102,35 @@ var _ = Describe("PubSub", func() {
Expect(len(channels)).To(BeNumerically(">=", 2))
})
+ It("should sharded pub/sub channels", func() {
+ channels, err := client.PubSubShardChannels(ctx, "mychannel*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(BeEmpty())
+
+ pubsub := client.SSubscribe(ctx, "mychannel", "mychannel2")
+ defer pubsub.Close()
+
+ channels, err = client.PubSubShardChannels(ctx, "mychannel*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(ConsistOf([]string{"mychannel", "mychannel2"}))
+
+ channels, err = client.PubSubShardChannels(ctx, "").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(BeEmpty())
+
+ channels, err = client.PubSubShardChannels(ctx, "*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(channels)).To(BeNumerically(">=", 2))
+
+ nums, err := client.PubSubShardNumSub(ctx, "mychannel", "mychannel2", "mychannel3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nums).To(Equal(map[string]int64{
+ "mychannel": 1,
+ "mychannel2": 1,
+ "mychannel3": 0,
+ }))
+ })
+
It("should return the numbers of subscribers", func() {
pubsub := client.Subscribe(ctx, "mychannel", "mychannel2")
defer pubsub.Close()
@@ -210,6 +233,82 @@ var _ = Describe("PubSub", func() {
Expect(stats.Misses).To(Equal(uint32(1)))
})
+ It("should sharded pub/sub", func() {
+ pubsub := client.SSubscribe(ctx, "mychannel", "mychannel2")
+ defer pubsub.Close()
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("ssubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel"))
+ Expect(subscr.Count).To(Equal(1))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("ssubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel2"))
+ Expect(subscr.Count).To(Equal(2))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err.(net.Error).Timeout()).To(Equal(true))
+ Expect(msgi).NotTo(HaveOccurred())
+ }
+
+ n, err := client.SPublish(ctx, "mychannel", "hello").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ n, err = client.SPublish(ctx, "mychannel2", "hello2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ Expect(pubsub.SUnsubscribe(ctx, "mychannel", "mychannel2")).NotTo(HaveOccurred())
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ msg := msgi.(*redis.Message)
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("hello"))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ msg := msgi.(*redis.Message)
+ Expect(msg.Channel).To(Equal("mychannel2"))
+ Expect(msg.Payload).To(Equal("hello2"))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("sunsubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel"))
+ Expect(subscr.Count).To(Equal(1))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("sunsubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel2"))
+ Expect(subscr.Count).To(Equal(0))
+ }
+
+ stats := client.PoolStats()
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ })
+
It("should ping/pong", func() {
pubsub := client.Subscribe(ctx, "mychannel")
defer pubsub.Close()
@@ -421,30 +520,6 @@ var _ = Describe("PubSub", func() {
Expect(msg.Payload).To(Equal(string(bigVal)))
})
- It("handles message payload slice with server-assisted client-size caching", func() {
- pubsub := client.Subscribe(ctx, "__redis__:invalidate")
- defer pubsub.Close()
-
- client2 := redis.NewClient(redisOptions())
- defer client2.Close()
-
- err := client2.Do(ctx, "CLIENT", "TRACKING", "on", "REDIRECT", clientID).Err()
- Expect(err).NotTo(HaveOccurred())
-
- err = client2.Do(ctx, "GET", "mykey").Err()
- Expect(err).To(Equal(redis.Nil))
-
- err = client2.Do(ctx, "SET", "mykey", "myvalue").Err()
- Expect(err).NotTo(HaveOccurred())
-
- ch := pubsub.Channel()
-
- var msg *redis.Message
- Eventually(ch).Should(Receive(&msg))
- Expect(msg.Channel).To(Equal("__redis__:invalidate"))
- Expect(msg.PayloadSlice).To(Equal([]string{"mykey"}))
- })
-
It("supports concurrent Ping and Receive", func() {
const N = 100
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/race_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/race_test.go
index 34699d1..aeb2d1f 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/race_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/race_test.go
@@ -2,7 +2,6 @@ package redis_test
import (
"bytes"
- "context"
"fmt"
"net"
"strconv"
@@ -10,10 +9,10 @@ import (
"testing"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("races", func() {
@@ -138,7 +137,7 @@ var _ = Describe("races", func() {
})
})
- It("should select db", func() {
+ It("should select db", Label("NonRedisEnterprise"), func() {
err := client.Set(ctx, "db", 1, 0).Err()
Expect(err).NotTo(HaveOccurred())
@@ -215,53 +214,6 @@ var _ = Describe("races", func() {
Expect(val).To(Equal(int64(C * N)))
})
- It("should Pipeline", func() {
- perform(C, func(id int) {
- pipe := client.Pipeline()
- for i := 0; i < N; i++ {
- pipe.Echo(ctx, fmt.Sprint(i))
- }
-
- cmds, err := pipe.Exec(ctx)
- Expect(err).NotTo(HaveOccurred())
- Expect(cmds).To(HaveLen(N))
-
- for i := 0; i < N; i++ {
- Expect(cmds[i].(*redis.StringCmd).Val()).To(Equal(fmt.Sprint(i)))
- }
- })
- })
-
- It("should Pipeline", func() {
- pipe := client.Pipeline()
- perform(N, func(id int) {
- pipe.Incr(ctx, "key")
- })
-
- cmds, err := pipe.Exec(ctx)
- Expect(err).NotTo(HaveOccurred())
- Expect(cmds).To(HaveLen(N))
-
- n, err := client.Get(ctx, "key").Int64()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(int64(N)))
- })
-
- It("should TxPipeline", func() {
- pipe := client.TxPipeline()
- perform(N, func(id int) {
- pipe.Incr(ctx, "key")
- })
-
- cmds, err := pipe.Exec(ctx)
- Expect(err).NotTo(HaveOccurred())
- Expect(cmds).To(HaveLen(N))
-
- n, err := client.Get(ctx, "key").Int64()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(int64(N)))
- })
-
PIt("should BLPop", func() {
var received uint32
@@ -289,36 +241,9 @@ var _ = Describe("races", func() {
wg.Wait()
Expect(atomic.LoadUint32(&received)).To(Equal(uint32(C * N)))
})
-
- It("should WithContext", func() {
- perform(C, func(_ int) {
- err := client.WithContext(ctx).Ping(ctx).Err()
- Expect(err).NotTo(HaveOccurred())
- })
- })
-
- It("should abort on context timeout", func() {
- opt := redisClusterOptions()
- client := cluster.newClusterClient(ctx, opt)
-
- ctx, cancel := context.WithCancel(context.Background())
-
- wg := performAsync(C, func(_ int) {
- _, err := client.XRead(ctx, &redis.XReadArgs{
- Streams: []string{"test", "$"},
- Block: 1 * time.Second,
- }).Result()
- Expect(err).To(HaveOccurred())
- Expect(err.Error()).To(Or(Equal(context.Canceled.Error()), ContainSubstring("operation was canceled")))
- })
-
- time.Sleep(10 * time.Millisecond)
- cancel()
- wg.Wait()
- })
})
-var _ = Describe("cluster races", func() {
+var _ = Describe("cluster races", Label("NonRedisEnterprise"), func() {
var client *redis.ClusterClient
var C, N int
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis.go
new file mode 100644
index 0000000..d25a0d3
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis.go
@@ -0,0 +1,852 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hscan"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+// Scanner internal/hscan.Scanner exposed interface.
+type Scanner = hscan.Scanner
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+// SetLogger set custom log
+func SetLogger(logger internal.Logging) {
+ internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+ DialHook(next DialHook) DialHook
+ ProcessHook(next ProcessHook) ProcessHook
+ ProcessPipelineHook(next ProcessPipelineHook) ProcessPipelineHook
+}
+
+type (
+ DialHook func(ctx context.Context, network, addr string) (net.Conn, error)
+ ProcessHook func(ctx context.Context, cmd Cmder) error
+ ProcessPipelineHook func(ctx context.Context, cmds []Cmder) error
+)
+
+type hooksMixin struct {
+ hooksMu *sync.Mutex
+
+ slice []Hook
+ initial hooks
+ current hooks
+}
+
+func (hs *hooksMixin) initHooks(hooks hooks) {
+ hs.hooksMu = new(sync.Mutex)
+ hs.initial = hooks
+ hs.chain()
+}
+
+type hooks struct {
+ dial DialHook
+ process ProcessHook
+ pipeline ProcessPipelineHook
+ txPipeline ProcessPipelineHook
+}
+
+func (h *hooks) setDefaults() {
+ if h.dial == nil {
+ h.dial = func(ctx context.Context, network, addr string) (net.Conn, error) { return nil, nil }
+ }
+ if h.process == nil {
+ h.process = func(ctx context.Context, cmd Cmder) error { return nil }
+ }
+ if h.pipeline == nil {
+ h.pipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
+ }
+ if h.txPipeline == nil {
+ h.txPipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
+ }
+}
+
+// AddHook is to add a hook to the queue.
+// Hook is a function executed during network connection, command execution, and pipeline,
+// it is a first-in-first-out stack queue (FIFO).
+// You need to execute the next hook in each hook, unless you want to terminate the execution of the command.
+// For example, you added hook-1, hook-2:
+//
+// client.AddHook(hook-1, hook-2)
+//
+// hook-1:
+//
+// func (Hook1) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
+// return func(ctx context.Context, cmd Cmder) error {
+// print("hook-1 start")
+// next(ctx, cmd)
+// print("hook-1 end")
+// return nil
+// }
+// }
+//
+// hook-2:
+//
+// func (Hook2) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
+// return func(ctx context.Context, cmd redis.Cmder) error {
+// print("hook-2 start")
+// next(ctx, cmd)
+// print("hook-2 end")
+// return nil
+// }
+// }
+//
+// The execution sequence is:
+//
+// hook-1 start -> hook-2 start -> exec redis cmd -> hook-2 end -> hook-1 end
+//
+// Please note: "next(ctx, cmd)" is very important, it will call the next hook,
+// if "next(ctx, cmd)" is not executed, the redis command will not be executed.
+func (hs *hooksMixin) AddHook(hook Hook) {
+ hs.slice = append(hs.slice, hook)
+ hs.chain()
+}
+
+func (hs *hooksMixin) chain() {
+ hs.initial.setDefaults()
+
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+
+ hs.current.dial = hs.initial.dial
+ hs.current.process = hs.initial.process
+ hs.current.pipeline = hs.initial.pipeline
+ hs.current.txPipeline = hs.initial.txPipeline
+
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].DialHook(hs.current.dial); wrapped != nil {
+ hs.current.dial = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessHook(hs.current.process); wrapped != nil {
+ hs.current.process = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.pipeline); wrapped != nil {
+ hs.current.pipeline = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.txPipeline); wrapped != nil {
+ hs.current.txPipeline = wrapped
+ }
+ }
+}
+
+func (hs *hooksMixin) clone() hooksMixin {
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+
+ clone := *hs
+ l := len(clone.slice)
+ clone.slice = clone.slice[:l:l]
+ clone.hooksMu = new(sync.Mutex)
+ return clone
+}
+
+func (hs *hooksMixin) withProcessHook(ctx context.Context, cmd Cmder, hook ProcessHook) error {
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].ProcessHook(hook); wrapped != nil {
+ hook = wrapped
+ }
+ }
+ return hook(ctx, cmd)
+}
+
+func (hs *hooksMixin) withProcessPipelineHook(
+ ctx context.Context, cmds []Cmder, hook ProcessPipelineHook,
+) error {
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].ProcessPipelineHook(hook); wrapped != nil {
+ hook = wrapped
+ }
+ }
+ return hook(ctx, cmds)
+}
+
+func (hs *hooksMixin) dialHook(ctx context.Context, network, addr string) (net.Conn, error) {
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+ return hs.current.dial(ctx, network, addr)
+}
+
+func (hs *hooksMixin) processHook(ctx context.Context, cmd Cmder) error {
+ return hs.current.process(ctx, cmd)
+}
+
+func (hs *hooksMixin) processPipelineHook(ctx context.Context, cmds []Cmder) error {
+ return hs.current.pipeline(ctx, cmds)
+}
+
+func (hs *hooksMixin) processTxPipelineHook(ctx context.Context, cmds []Cmder) error {
+ return hs.current.txPipeline(ctx, cmds)
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+ opt *Options
+ connPool pool.Pooler
+
+ onClose func() error // hook called when client is closed
+}
+
+func (c *baseClient) clone() *baseClient {
+ clone := *c
+ return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+ opt := c.opt.clone()
+ opt.ReadTimeout = timeout
+ opt.WriteTimeout = timeout
+
+ clone := c.clone()
+ clone.opt = opt
+
+ return clone
+}
+
+func (c *baseClient) String() string {
+ return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.NewConn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initConn(ctx, cn)
+ if err != nil {
+ _ = c.connPool.CloseConn(cn)
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+ if c.opt.Limiter != nil {
+ err := c.opt.Limiter.Allow()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cn, err := c._getConn(ctx)
+ if err != nil {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if cn.Inited {
+ return cn, nil
+ }
+
+ if err := c.initConn(ctx, cn); err != nil {
+ c.connPool.Remove(ctx, cn, err)
+ if err := errors.Unwrap(err); err != nil {
+ return nil, err
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+ if cn.Inited {
+ return nil
+ }
+ cn.Inited = true
+
+ username, password := c.opt.Username, c.opt.Password
+ if c.opt.CredentialsProvider != nil {
+ username, password = c.opt.CredentialsProvider()
+ }
+
+ connPool := pool.NewSingleConnPool(c.connPool, cn)
+ conn := newConn(c.opt, connPool)
+
+ var auth bool
+ protocol := c.opt.Protocol
+ // By default, use RESP3 in current version.
+ if protocol < 2 {
+ protocol = 3
+ }
+
+ // for redis-server versions that do not support the HELLO command,
+ // RESP2 will continue to be used.
+ if err := conn.Hello(ctx, protocol, username, password, "").Err(); err == nil {
+ auth = true
+ } else if !isRedisError(err) {
+ // When the server responds with the RESP protocol and the result is not a normal
+ // execution result of the HELLO command, we consider it to be an indication that
+ // the server does not support the HELLO command.
+ // The server may be a redis-server that does not support the HELLO command,
+ // or it could be DragonflyDB or a third-party redis-proxy. They all respond
+ // with different error string results for unsupported commands, making it
+ // difficult to rely on error strings to determine all results.
+ return err
+ }
+
+ _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
+ if !auth && password != "" {
+ if username != "" {
+ pipe.AuthACL(ctx, username, password)
+ } else {
+ pipe.Auth(ctx, password)
+ }
+ }
+
+ if c.opt.DB > 0 {
+ pipe.Select(ctx, c.opt.DB)
+ }
+
+ if c.opt.readOnly {
+ pipe.ReadOnly(ctx)
+ }
+
+ if c.opt.ClientName != "" {
+ pipe.ClientSetName(ctx, c.opt.ClientName)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if !c.opt.DisableIndentity {
+ libName := ""
+ libVer := Version()
+ if c.opt.IdentitySuffix != "" {
+ libName = c.opt.IdentitySuffix
+ }
+ p := conn.Pipeline()
+ p.ClientSetInfo(ctx, WithLibraryName(libName))
+ p.ClientSetInfo(ctx, WithLibraryVersion(libVer))
+ _, _ = p.Exec(ctx)
+ }
+
+ if c.opt.OnConnect != nil {
+ return c.opt.OnConnect(ctx, conn)
+ }
+ return nil
+}
+
+func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+
+ if isBadConn(err, false, c.opt.Addr) {
+ c.connPool.Remove(ctx, cn, err)
+ } else {
+ c.connPool.Put(ctx, cn)
+ }
+}
+
+func (c *baseClient) withConn(
+ ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+ cn, err := c.getConn(ctx)
+ if err != nil {
+ return err
+ }
+
+ var fnErr error
+ defer func() {
+ c.releaseConn(ctx, cn, fnErr)
+ }()
+
+ fnErr = fn(ctx, cn)
+
+ return fnErr
+}
+
+func (c *baseClient) dial(ctx context.Context, network, addr string) (net.Conn, error) {
+ return c.opt.Dialer(ctx, network, addr)
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ attempt := attempt
+
+ retry, err := c._process(ctx, cmd, attempt)
+ if err == nil || !retry {
+ return err
+ }
+
+ lastErr = err
+ }
+ return lastErr
+}
+
+func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return false, err
+ }
+ }
+
+ retryTimeout := uint32(0)
+ if err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ }); err != nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ return err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), cmd.readReply); err != nil {
+ if cmd.readTimeout() == nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ } else {
+ atomic.StoreUint32(&retryTimeout, 0)
+ }
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+ return retry, err
+ }
+
+ return false, nil
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+ if timeout := cmd.readTimeout(); timeout != nil {
+ t := *timeout
+ if t == 0 {
+ return 0
+ }
+ return t + 10*time.Second
+ }
+ return c.opt.ReadTimeout
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+ var firstErr error
+ if c.onClose != nil {
+ if err := c.onClose(); err != nil {
+ firstErr = err
+ }
+ }
+ if err := c.connPool.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+ return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ if err := c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds); err != nil {
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ if err := c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds); err != nil {
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ // Enable retries by default to retry dial errors returned by withConn.
+ canRetry := true
+ lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ var err error
+ canRetry, err = p(ctx, cn, cmds)
+ return err
+ })
+ if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return pipelineReadCmds(rd, cmds)
+ }); err != nil {
+ return true, err
+ }
+
+ return false, nil
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+ for i, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+ if err != nil && !isRedisError(err) {
+ setCmdsErr(cmds[i+1:], err)
+ return err
+ }
+ }
+ // Retry errors like "LOADING redis is loading the dataset in memory".
+ return cmds[0].Err()
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ trimmedCmds := cmds[1 : len(cmds)-1]
+
+ if err := txPipelineReadQueued(rd, statusCmd, trimmedCmds); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return pipelineReadCmds(rd, trimmedCmds)
+ }); err != nil {
+ return false, err
+ }
+
+ return false, nil
+}
+
+func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
+ // Parse +OK.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ // Parse +QUEUED.
+ for range cmds {
+ if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ if line[0] != proto.RespArray {
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+func (c *baseClient) context(ctx context.Context) context.Context {
+ if c.opt.ContextTimeoutEnabled {
+ return ctx
+ }
+ return context.Background()
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more underlying connections.
+// It's safe for concurrent use by multiple goroutines.
+//
+// Client creates and frees connections automatically; it also maintains a free pool
+// of idle connections. You can control the pool size with Config.PoolSize option.
+type Client struct {
+ *baseClient
+ cmdable
+ hooksMixin
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+ opt.init()
+
+ c := Client{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+ c.init()
+ c.connPool = newConnPool(opt, c.dialHook)
+
+ return &c
+}
+
+func (c *Client) init() {
+ c.cmdable = c.Process
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+ clone := *c
+ clone.baseClient = c.baseClient.withTimeout(timeout)
+ clone.init()
+ return &clone
+}
+
+func (c *Client) Conn() *Conn {
+ return newConn(c.opt, pool.NewStickyConnPool(c.connPool))
+}
+
+// Do create a Cmd from the args and processes the cmd.
+func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Client) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+ return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+ stats := c.connPool.Stats()
+ return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: pipelineExecer(c.processPipelineHook),
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+// sub := client.Subscribe(queryResp)
+// iface, err := sub.Receive()
+// if err != nil {
+// // handle error
+// }
+//
+// // Should be *Subscription, but others are possible if other actions have been
+// // taken on sub since it was created.
+// switch iface.(type) {
+// case *Subscription:
+// // subscribe succeeded
+// case *Message:
+// // received first message
+// case *Pong:
+// // pong received
+// default:
+// // handle error
+// }
+//
+// ch := sub.Channel()
+func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+// Channels can be omitted to create empty subscription.
+func (c *Client) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.SSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+// Conn represents a single Redis connection rather than a pool of connections.
+// Prefer running commands from Client unless there is a specific need
+// for a continuous single Redis connection.
+type Conn struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooksMixin
+}
+
+func newConn(opt *Options, connPool pool.Pooler) *Conn {
+ c := Conn{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: connPool,
+ },
+ }
+
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+
+ return &c
+}
+
+func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.processPipelineHook,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis_test.go
index 095da2d..66d69c7 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis_test.go
@@ -4,28 +4,33 @@ import (
"bytes"
"context"
"errors"
+ "fmt"
"net"
"testing"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
-type redisHookError struct {
- redis.Hook
-}
+type redisHookError struct{}
var _ redis.Hook = redisHookError{}
-func (redisHookError) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- return ctx, nil
+func (redisHookError) DialHook(hook redis.DialHook) redis.DialHook {
+ return hook
+}
+
+func (redisHookError) ProcessHook(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ return errors.New("hook error")
+ }
}
-func (redisHookError) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
- return errors.New("hook error")
+func (redisHookError) ProcessPipelineHook(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return hook
}
func TestHookError(t *testing.T) {
@@ -60,7 +65,11 @@ var _ = Describe("Client", func() {
})
It("should Stringer", func() {
- Expect(client.String()).To(Equal("Redis<:6380 db:15>"))
+ if RECluster {
+ Expect(client.String()).To(Equal(fmt.Sprintf("Redis<:%s db:0>", redisPort)))
+ } else {
+ Expect(client.String()).To(Equal(fmt.Sprintf("Redis<:%s db:15>", redisPort)))
+ }
})
It("supports context", func() {
@@ -71,7 +80,7 @@ var _ = Describe("Client", func() {
Expect(err).To(MatchError("context canceled"))
})
- It("supports WithTimeout", func() {
+ It("supports WithTimeout", Label("NonRedisEnterprise"), func() {
err := client.ClientPause(ctx, time.Second).Err()
Expect(err).NotTo(HaveOccurred())
@@ -136,17 +145,6 @@ var _ = Describe("Client", func() {
Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
})
- It("should close pipeline without closing the client", func() {
- pipeline := client.Pipeline()
- Expect(pipeline.Close()).NotTo(HaveOccurred())
-
- pipeline.Ping(ctx)
- _, err := pipeline.Exec(ctx)
- Expect(err).To(MatchError("redis: client is closed"))
-
- Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
- })
-
It("should close pubsub when client is closed", func() {
pubsub := client.Subscribe(ctx)
Expect(client.Close()).NotTo(HaveOccurred())
@@ -157,13 +155,7 @@ var _ = Describe("Client", func() {
Expect(pubsub.Close()).NotTo(HaveOccurred())
})
- It("should close pipeline when client is closed", func() {
- pipeline := client.Pipeline()
- Expect(client.Close()).NotTo(HaveOccurred())
- Expect(pipeline.Close()).NotTo(HaveOccurred())
- })
-
- It("should select DB", func() {
+ It("should select DB", Label("NonRedisEnterprise"), func() {
db2 := redis.NewClient(&redis.Options{
Addr: redisAddr,
DB: 2,
@@ -182,6 +174,48 @@ var _ = Describe("Client", func() {
Expect(db2.Close()).NotTo(HaveOccurred())
})
+ It("should client setname", func() {
+ opt := redisOptions()
+ opt.ClientName = "hi"
+ db := redis.NewClient(opt)
+
+ defer func() {
+ Expect(db.Close()).NotTo(HaveOccurred())
+ }()
+
+ Expect(db.Ping(ctx).Err()).NotTo(HaveOccurred())
+ val, err := db.ClientList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainSubstring("name=hi"))
+ })
+
+ It("should client PROTO 2", func() {
+ opt := redisOptions()
+ opt.Protocol = 2
+ db := redis.NewClient(opt)
+
+ defer func() {
+ Expect(db.Close()).NotTo(HaveOccurred())
+ }()
+
+ val, err := db.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainElements("proto", int64(2)))
+ })
+
+ It("should client PROTO 3", func() {
+ opt := redisOptions()
+ db := redis.NewClient(opt)
+
+ defer func() {
+ Expect(db.Close()).NotTo(HaveOccurred())
+ }()
+
+ val, err := db.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(HaveKeyWithValue("proto", int64(3)))
+ })
+
It("processes custom commands", func() {
cmd := redis.NewCmd(ctx, "PING")
_ = client.Process(ctx, cmd)
@@ -313,9 +347,21 @@ var _ = Describe("Client", func() {
})
It("should Conn", func() {
- err := client.Conn(ctx).Get(ctx, "this-key-does-not-exist").Err()
+ err := client.Conn().Get(ctx, "this-key-does-not-exist").Err()
Expect(err).To(Equal(redis.Nil))
})
+
+ It("should set and scan net.IP", func() {
+ ip := net.ParseIP("192.168.1.1")
+ err := client.Set(ctx, "ip", ip, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var ip2 net.IP
+ err = client.Get(ctx, "ip").Scan(&ip2)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(ip2).To(Equal(ip))
+ })
})
var _ = Describe("Client timeout", func() {
@@ -447,3 +493,143 @@ var _ = Describe("Client context cancelation", func() {
Expect(err).To(BeIdenticalTo(context.Canceled))
})
})
+
+var _ = Describe("Conn", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("TxPipeline", Label("NonRedisEnterprise"), func() {
+ tx := client.Conn().TxPipeline()
+ tx.SwapDB(ctx, 0, 2)
+ tx.SwapDB(ctx, 1, 0)
+ _, err := tx.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ })
+})
+
+var _ = Describe("Hook", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("fifo", func() {
+ var res []string
+ client.AddHook(&hook{
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ res = append(res, "hook-1-process-start")
+ err := hook(ctx, cmd)
+ res = append(res, "hook-1-process-end")
+ return err
+ }
+ },
+ })
+ client.AddHook(&hook{
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ res = append(res, "hook-2-process-start")
+ err := hook(ctx, cmd)
+ res = append(res, "hook-2-process-end")
+ return err
+ }
+ },
+ })
+
+ err := client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(res).To(Equal([]string{
+ "hook-1-process-start",
+ "hook-2-process-start",
+ "hook-2-process-end",
+ "hook-1-process-end",
+ }))
+ })
+
+ It("wrapped error in a hook", func() {
+ client.AddHook(&hook{
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ if err := hook(ctx, cmd); err != nil {
+ return fmt.Errorf("wrapped error: %w", err)
+ }
+ return nil
+ }
+ },
+ })
+ client.ScriptFlush(ctx)
+
+ script := redis.NewScript(`return 'Script and hook'`)
+
+ cmd := script.Run(ctx, client, nil)
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal("Script and hook"))
+ })
+})
+
+var _ = Describe("Hook with MinIdleConns", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ options := redisOptions()
+ options.MinIdleConns = 1
+ client = redis.NewClient(options)
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("fifo", func() {
+ var res []string
+ client.AddHook(&hook{
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ res = append(res, "hook-1-process-start")
+ err := hook(ctx, cmd)
+ res = append(res, "hook-1-process-end")
+ return err
+ }
+ },
+ })
+ client.AddHook(&hook{
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ res = append(res, "hook-2-process-start")
+ err := hook(ctx, cmd)
+ res = append(res, "hook-2-process-end")
+ return err
+ }
+ },
+ })
+
+ err := client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(res).To(Equal([]string{
+ "hook-1-process-start",
+ "hook-2-process-start",
+ "hook-2-process-end",
+ "hook-1-process-end",
+ }))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/result.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/result.go
index 24cfd49..cfd4cf9 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/result.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/result.go
@@ -82,17 +82,17 @@ func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
return &cmd
}
-// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
-func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
- var cmd StringStringMapCmd
+// NewMapStringStringResult returns a MapStringStringCmd initialised with val and err for testing.
+func NewMapStringStringResult(val map[string]string, err error) *MapStringStringCmd {
+ var cmd MapStringStringCmd
cmd.val = val
cmd.SetErr(err)
return &cmd
}
-// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
-func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
- var cmd StringIntMapCmd
+// NewMapStringIntCmdResult returns a MapStringIntCmd initialised with val and err for testing.
+func NewMapStringIntCmdResult(val map[string]int64, err error) *MapStringIntCmd {
+ var cmd MapStringIntCmd
cmd.val = val
cmd.SetErr(err)
return &cmd
@@ -114,7 +114,7 @@ func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
return &cmd
}
-// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
+// NewZWithKeyCmdResult returns a ZWithKeyCmd initialised with val and err for testing.
func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
var cmd ZWithKeyCmd
cmd.val = val
@@ -178,3 +178,11 @@ func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
cmd.SetErr(err)
return &cmd
}
+
+// NewXPendingResult returns a XPendingCmd initialised with val and err for testing.
+func NewXPendingResult(val *XPending, err error) *XPendingCmd {
+ var cmd XPendingCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring.go
index 4df00fc..4ae0054 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring.go
@@ -12,12 +12,12 @@ import (
"time"
"github.com/cespare/xxhash/v2"
- rendezvous "github.com/dgryski/go-rendezvous" //nolint
+ "github.com/dgryski/go-rendezvous" //nolint
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hashtag"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/rand"
)
var errRingShardsDown = errors.New("redis: all ring shards are down")
@@ -48,8 +48,11 @@ type RingOptions struct {
// Map of name => host:port addresses of ring shards.
Addrs map[string]string
- // NewClient creates a shard client with provided name and options.
- NewClient func(name string, opt *Options) *Client
+ // NewClient creates a shard client with provided options.
+ NewClient func(opt *Options) *Client
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
// Frequency of PING commands sent to check shards availability.
// Shard is considered down after 3 subsequent failed checks.
@@ -67,6 +70,7 @@ type RingOptions struct {
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
OnConnect func(ctx context.Context, cn *Conn) error
+ Protocol int
Username string
Password string
DB int
@@ -75,27 +79,32 @@ type RingOptions struct {
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
TLSConfig *tls.Config
Limiter Limiter
+
+ DisableIndentity bool
+ IdentitySuffix string
}
func (opt *RingOptions) init() {
if opt.NewClient == nil {
- opt.NewClient = func(name string, opt *Options) *Client {
+ opt.NewClient = func(opt *Options) *Client {
return NewClient(opt)
}
}
@@ -129,29 +138,36 @@ func (opt *RingOptions) init() {
func (opt *RingOptions) clientOptions() *Options {
return &Options{
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
+ ClientName: opt.ClientName,
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+ Protocol: opt.Protocol,
Username: opt.Username,
Password: opt.Password,
DB: opt.DB,
MaxRetries: -1,
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
Limiter: opt.Limiter,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
}
}
@@ -160,14 +176,16 @@ func (opt *RingOptions) clientOptions() *Options {
type ringShard struct {
Client *Client
down int32
+ addr string
}
-func newRingShard(opt *RingOptions, name, addr string) *ringShard {
+func newRingShard(opt *RingOptions, addr string) *ringShard {
clopt := opt.clientOptions()
clopt.Addr = addr
return &ringShard{
- Client: opt.NewClient(name, clopt),
+ Client: opt.NewClient(clopt),
+ addr: addr,
}
}
@@ -208,161 +226,237 @@ func (shard *ringShard) Vote(up bool) bool {
//------------------------------------------------------------------------------
-type ringShards struct {
+type ringSharding struct {
opt *RingOptions
- mu sync.RWMutex
- hash ConsistentHash
- shards map[string]*ringShard // read only
- list []*ringShard // read only
- numShard int
- closed bool
+ mu sync.RWMutex
+ shards *ringShards
+ closed bool
+ hash ConsistentHash
+ numShard int
+ onNewNode []func(rdb *Client)
+
+ // ensures exclusive access to SetAddrs so there is no need
+ // to hold mu for the duration of potentially long shard creation
+ setAddrsMu sync.Mutex
+}
+
+type ringShards struct {
+ m map[string]*ringShard
+ list []*ringShard
+}
+
+func newRingSharding(opt *RingOptions) *ringSharding {
+ c := &ringSharding{
+ opt: opt,
+ }
+ c.SetAddrs(opt.Addrs)
+
+ return c
+}
+
+func (c *ringSharding) OnNewNode(fn func(rdb *Client)) {
+ c.mu.Lock()
+ c.onNewNode = append(c.onNewNode, fn)
+ c.mu.Unlock()
}
-func newRingShards(opt *RingOptions) *ringShards {
- shards := make(map[string]*ringShard, len(opt.Addrs))
- list := make([]*ringShard, 0, len(shards))
+// SetAddrs replaces the shards in use, such that you can increase and
+// decrease number of shards, that you use. It will reuse shards that
+// existed before and close the ones that will not be used anymore.
+func (c *ringSharding) SetAddrs(addrs map[string]string) {
+ c.setAddrsMu.Lock()
+ defer c.setAddrsMu.Unlock()
- for name, addr := range opt.Addrs {
- shard := newRingShard(opt, name, addr)
- shards[name] = shard
+ cleanup := func(shards map[string]*ringShard) {
+ for addr, shard := range shards {
+ if err := shard.Client.Close(); err != nil {
+ internal.Logger.Printf(context.Background(), "shard.Close %s failed: %s", addr, err)
+ }
+ }
+ }
- list = append(list, shard)
+ c.mu.RLock()
+ if c.closed {
+ c.mu.RUnlock()
+ return
}
+ existing := c.shards
+ c.mu.RUnlock()
- c := &ringShards{
- opt: opt,
+ shards, created, unused := c.newRingShards(addrs, existing)
- shards: shards,
- list: list,
+ c.mu.Lock()
+ if c.closed {
+ cleanup(created)
+ c.mu.Unlock()
+ return
}
- c.rebalance()
+ c.shards = shards
+ c.rebalanceLocked()
+ c.mu.Unlock()
- return c
+ cleanup(unused)
}
-func (c *ringShards) List() []*ringShard {
+func (c *ringSharding) newRingShards(
+ addrs map[string]string, existing *ringShards,
+) (shards *ringShards, created, unused map[string]*ringShard) {
+ shards = &ringShards{m: make(map[string]*ringShard, len(addrs))}
+ created = make(map[string]*ringShard) // indexed by addr
+ unused = make(map[string]*ringShard) // indexed by addr
+
+ if existing != nil {
+ for _, shard := range existing.list {
+ unused[shard.addr] = shard
+ }
+ }
+
+ for name, addr := range addrs {
+ if shard, ok := unused[addr]; ok {
+ shards.m[name] = shard
+ delete(unused, addr)
+ } else {
+ shard := newRingShard(c.opt, addr)
+ shards.m[name] = shard
+ created[addr] = shard
+
+ for _, fn := range c.onNewNode {
+ fn(shard.Client)
+ }
+ }
+ }
+
+ for _, shard := range shards.m {
+ shards.list = append(shards.list, shard)
+ }
+
+ return
+}
+
+func (c *ringSharding) List() []*ringShard {
var list []*ringShard
c.mu.RLock()
if !c.closed {
- list = c.list
+ list = c.shards.list
}
c.mu.RUnlock()
return list
}
-func (c *ringShards) Hash(key string) string {
+func (c *ringSharding) Hash(key string) string {
key = hashtag.Key(key)
var hash string
c.mu.RLock()
+ defer c.mu.RUnlock()
+
if c.numShard > 0 {
hash = c.hash.Get(key)
}
- c.mu.RUnlock()
return hash
}
-func (c *ringShards) GetByKey(key string) (*ringShard, error) {
+func (c *ringSharding) GetByKey(key string) (*ringShard, error) {
key = hashtag.Key(key)
c.mu.RLock()
+ defer c.mu.RUnlock()
if c.closed {
- c.mu.RUnlock()
return nil, pool.ErrClosed
}
if c.numShard == 0 {
- c.mu.RUnlock()
return nil, errRingShardsDown
}
- hash := c.hash.Get(key)
- if hash == "" {
- c.mu.RUnlock()
+ shardName := c.hash.Get(key)
+ if shardName == "" {
return nil, errRingShardsDown
}
-
- shard := c.shards[hash]
- c.mu.RUnlock()
-
- return shard, nil
+ return c.shards.m[shardName], nil
}
-func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
+func (c *ringSharding) GetByName(shardName string) (*ringShard, error) {
if shardName == "" {
return c.Random()
}
c.mu.RLock()
- shard := c.shards[shardName]
- c.mu.RUnlock()
- return shard, nil
+ defer c.mu.RUnlock()
+
+ return c.shards.m[shardName], nil
}
-func (c *ringShards) Random() (*ringShard, error) {
+func (c *ringSharding) Random() (*ringShard, error) {
return c.GetByKey(strconv.Itoa(rand.Int()))
}
-// heartbeat monitors state of each shard in the ring.
-func (c *ringShards) Heartbeat(frequency time.Duration) {
+// Heartbeat monitors state of each shard in the ring.
+func (c *ringSharding) Heartbeat(ctx context.Context, frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
- ctx := context.Background()
- for range ticker.C {
- var rebalance bool
-
- for _, shard := range c.List() {
- err := shard.Client.Ping(ctx).Err()
- isUp := err == nil || err == pool.ErrPoolTimeout
- if shard.Vote(isUp) {
- internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
- rebalance = true
+ for {
+ select {
+ case <-ticker.C:
+ var rebalance bool
+
+ for _, shard := range c.List() {
+ err := shard.Client.Ping(ctx).Err()
+ isUp := err == nil || err == pool.ErrPoolTimeout
+ if shard.Vote(isUp) {
+ internal.Logger.Printf(ctx, "ring shard state changed: %s", shard)
+ rebalance = true
+ }
}
- }
- if rebalance {
- c.rebalance()
+ if rebalance {
+ c.mu.Lock()
+ c.rebalanceLocked()
+ c.mu.Unlock()
+ }
+ case <-ctx.Done():
+ return
}
}
}
-// rebalance removes dead shards from the Ring.
-func (c *ringShards) rebalance() {
- c.mu.RLock()
- shards := c.shards
- c.mu.RUnlock()
+// rebalanceLocked removes dead shards from the Ring.
+// Requires c.mu locked.
+func (c *ringSharding) rebalanceLocked() {
+ if c.closed {
+ return
+ }
+ if c.shards == nil {
+ return
+ }
- liveShards := make([]string, 0, len(shards))
+ liveShards := make([]string, 0, len(c.shards.m))
- for name, shard := range shards {
+ for name, shard := range c.shards.m {
if shard.IsUp() {
liveShards = append(liveShards, name)
}
}
- hash := c.opt.NewConsistentHash(liveShards)
-
- c.mu.Lock()
- c.hash = hash
+ c.hash = c.opt.NewConsistentHash(liveShards)
c.numShard = len(liveShards)
- c.mu.Unlock()
}
-func (c *ringShards) Len() int {
+func (c *ringSharding) Len() int {
c.mu.RLock()
- l := c.numShard
- c.mu.RUnlock()
- return l
+ defer c.mu.RUnlock()
+
+ return c.numShard
}
-func (c *ringShards) Close() error {
+func (c *ringSharding) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
@@ -372,26 +466,22 @@ func (c *ringShards) Close() error {
c.closed = true
var firstErr error
- for _, shard := range c.shards {
+
+ for _, shard := range c.shards.list {
if err := shard.Client.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
+
c.hash = nil
c.shards = nil
- c.list = nil
+ c.numShard = 0
return firstErr
}
//------------------------------------------------------------------------------
-type ring struct {
- opt *RingOptions
- shards *ringShards
- cmdsInfoCache *cmdsInfoCache //nolint:structcheck
-}
-
// Ring is a Redis client that uses consistent hashing to distribute
// keys across multiple Redis servers (shards). It's safe for
// concurrent use by multiple goroutines.
@@ -407,47 +497,49 @@ type ring struct {
// and can tolerate losing data when one of the servers dies.
// Otherwise you should use Redis Cluster.
type Ring struct {
- *ring
cmdable
- hooks
- ctx context.Context
+ hooksMixin
+
+ opt *RingOptions
+ sharding *ringSharding
+ cmdsInfoCache *cmdsInfoCache
+ heartbeatCancelFn context.CancelFunc
}
func NewRing(opt *RingOptions) *Ring {
opt.init()
+ hbCtx, hbCancel := context.WithCancel(context.Background())
+
ring := Ring{
- ring: &ring{
- opt: opt,
- shards: newRingShards(opt),
- },
- ctx: context.Background(),
+ opt: opt,
+ sharding: newRingSharding(opt),
+ heartbeatCancelFn: hbCancel,
}
ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
ring.cmdable = ring.Process
- go ring.shards.Heartbeat(opt.HeartbeatFrequency)
+ ring.initHooks(hooks{
+ process: ring.process,
+ pipeline: func(ctx context.Context, cmds []Cmder) error {
+ return ring.generalProcessPipeline(ctx, cmds, false)
+ },
+ txPipeline: func(ctx context.Context, cmds []Cmder) error {
+ return ring.generalProcessPipeline(ctx, cmds, true)
+ },
+ })
- return &ring
-}
+ go ring.sharding.Heartbeat(hbCtx, opt.HeartbeatFrequency)
-func (c *Ring) Context() context.Context {
- return c.ctx
+ return &ring
}
-func (c *Ring) WithContext(ctx context.Context) *Ring {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
+func (c *Ring) SetAddrs(addrs map[string]string) {
+ c.sharding.SetAddrs(addrs)
}
-// Do creates a Cmd from the args and processes the cmd.
+// Do create a Cmd from the args and processes the cmd.
func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
_ = c.Process(ctx, cmd)
@@ -455,7 +547,9 @@ func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
}
func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.process)
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
}
// Options returns read-only Options that were used to create the client.
@@ -469,7 +563,7 @@ func (c *Ring) retryBackoff(attempt int) time.Duration {
// PoolStats returns accumulated connection pool stats.
func (c *Ring) PoolStats() *PoolStats {
- shards := c.shards.List()
+ shards := c.sharding.List()
var acc PoolStats
for _, shard := range shards {
s := shard.Client.connPool.Stats()
@@ -484,7 +578,7 @@ func (c *Ring) PoolStats() *PoolStats {
// Len returns the current number of shards in the ring.
func (c *Ring) Len() int {
- return c.shards.Len()
+ return c.sharding.Len()
}
// Subscribe subscribes the client to the specified channels.
@@ -493,7 +587,7 @@ func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
panic("at least one channel is required")
}
- shard, err := c.shards.GetByKey(channels[0])
+ shard, err := c.sharding.GetByKey(channels[0])
if err != nil {
// TODO: return PubSub with sticky error
panic(err)
@@ -507,7 +601,7 @@ func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
panic("at least one channel is required")
}
- shard, err := c.shards.GetByKey(channels[0])
+ shard, err := c.sharding.GetByKey(channels[0])
if err != nil {
// TODO: return PubSub with sticky error
panic(err)
@@ -515,13 +609,30 @@ func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
return shard.Client.PSubscribe(ctx, channels...)
}
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *Ring) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+ shard, err := c.sharding.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.SSubscribe(ctx, channels...)
+}
+
+func (c *Ring) OnNewNode(fn func(rdb *Client)) {
+ c.sharding.OnNewNode(fn)
+}
+
// ForEachShard concurrently calls the fn on each live shard in the ring.
// It returns the first error if any.
func (c *Ring) ForEachShard(
ctx context.Context,
fn func(ctx context.Context, client *Client) error,
) error {
- shards := c.shards.List()
+ shards := c.sharding.List()
var wg sync.WaitGroup
errCh := make(chan error, 1)
for _, shard := range shards {
@@ -552,7 +663,7 @@ func (c *Ring) ForEachShard(
}
func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
- shards := c.shards.List()
+ shards := c.sharding.List()
var firstErr error
for _, shard := range shards {
cmdsInfo, err := shard.Client.Command(ctx).Result()
@@ -569,26 +680,13 @@ func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
return nil, firstErr
}
-func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
- cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
- if err != nil {
- return nil
- }
- info := cmdsInfo[name]
- if info == nil {
- internal.Logger.Printf(ctx, "info for cmd=%s not found", name)
- }
- return info
-}
-
func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
- cmdInfo := c.cmdInfo(ctx, cmd.Name())
- pos := cmdFirstKeyPos(cmd, cmdInfo)
+ pos := cmdFirstKeyPos(cmd)
if pos == 0 {
- return c.shards.Random()
+ return c.sharding.Random()
}
firstKey := cmd.stringArg(pos)
- return c.shards.GetByKey(firstKey)
+ return c.sharding.GetByKey(firstKey)
}
func (c *Ring) process(ctx context.Context, cmd Cmder) error {
@@ -619,47 +717,41 @@ func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder
func (c *Ring) Pipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
+ exec: pipelineExecer(c.processPipelineHook),
}
pipe.init()
return &pipe
}
-func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, false)
- })
-}
-
func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
return c.TxPipeline().Pipelined(ctx, fn)
}
func (c *Ring) TxPipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
}
pipe.init()
return &pipe
}
-func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, true)
- })
-}
-
func (c *Ring) generalProcessPipeline(
ctx context.Context, cmds []Cmder, tx bool,
) error {
+ if tx {
+ // Trim multi .. exec.
+ cmds = cmds[1 : len(cmds)-1]
+ }
+
cmdsMap := make(map[string][]Cmder)
+
for _, cmd := range cmds {
- cmdInfo := c.cmdInfo(ctx, cmd.Name())
- hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
+ hash := cmd.stringArg(cmdFirstKeyPos(cmd))
if hash != "" {
- hash = c.shards.Hash(hash)
+ hash = c.sharding.Hash(hash)
}
cmdsMap[hash] = append(cmdsMap[hash], cmd)
}
@@ -670,7 +762,19 @@ func (c *Ring) generalProcessPipeline(
go func(hash string, cmds []Cmder) {
defer wg.Done()
- _ = c.processShardPipeline(ctx, hash, cmds, tx)
+ // TODO: retry?
+ shard, err := c.sharding.GetByName(hash)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return
+ }
+
+ if tx {
+ cmds = wrapMultiExec(ctx, cmds)
+ _ = shard.Client.processTxPipelineHook(ctx, cmds)
+ } else {
+ _ = shard.Client.processPipelineHook(ctx, cmds)
+ }
}(hash, cmds)
}
@@ -678,31 +782,16 @@ func (c *Ring) generalProcessPipeline(
return cmdsFirstErr(cmds)
}
-func (c *Ring) processShardPipeline(
- ctx context.Context, hash string, cmds []Cmder, tx bool,
-) error {
- // TODO: retry?
- shard, err := c.shards.GetByName(hash)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
-
- if tx {
- return shard.Client.processTxPipeline(ctx, cmds)
- }
- return shard.Client.processPipeline(ctx, cmds)
-}
-
func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
if len(keys) == 0 {
return fmt.Errorf("redis: Watch requires at least one key")
}
var shards []*ringShard
+
for _, key := range keys {
if key != "" {
- shard, err := c.shards.GetByKey(hashtag.Key(key))
+ shard, err := c.sharding.GetByKey(hashtag.Key(key))
if err != nil {
return err
}
@@ -732,5 +821,7 @@ func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) er
// It is rare to Close a Ring, as the Ring is meant to be long-lived
// and shared between many goroutines.
func (c *Ring) Close() error {
- return c.shards.Close()
+ c.heartbeatCancelFn()
+
+ return c.sharding.Close()
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring_test.go
index 03a49fd..b3017f6 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring_test.go
@@ -9,12 +9,43 @@ import (
"sync"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
+var _ = Describe("Redis Ring PROTO 2", func() {
+ const heartbeat = 100 * time.Millisecond
+
+ var ring *redis.Ring
+
+ BeforeEach(func() {
+ opt := redisRingOptions()
+ opt.Protocol = 2
+ opt.HeartbeatFrequency = heartbeat
+ ring = redis.NewRing(opt)
+
+ err := ring.ForEachShard(ctx, func(ctx context.Context, cl *redis.Client) error {
+ return cl.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(ring.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should ring PROTO 2", func() {
+ _ = ring.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainElements("proto", int64(2)))
+ return nil
+ })
+ })
+})
+
var _ = Describe("Redis Ring", func() {
const heartbeat = 100 * time.Millisecond
@@ -29,6 +60,7 @@ var _ = Describe("Redis Ring", func() {
BeforeEach(func() {
opt := redisRingOptions()
+ opt.ClientName = "ring_hi"
opt.HeartbeatFrequency = heartbeat
ring = redis.NewRing(opt)
@@ -50,6 +82,29 @@ var _ = Describe("Redis Ring", func() {
Expect(err).To(MatchError("context canceled"))
})
+ It("should ring client setname", func() {
+ err := ring.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ return c.Ping(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ _ = ring.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.ClientList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainSubstring("name=ring_hi"))
+ return nil
+ })
+ })
+
+ It("should ring PROTO 3", func() {
+ _ = ring.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(HaveKeyWithValue("proto", int64(3)))
+ return nil
+ })
+ })
+
It("distributes keys", func() {
setRingKeys()
@@ -113,7 +168,81 @@ var _ = Describe("Redis Ring", func() {
Expect(ringShard2.Info(ctx, "keyspace").Val()).To(ContainSubstring("keys=100"))
})
+ Describe("[new] dynamic setting ring shards", func() {
+ It("downscale shard and check reuse shard, upscale shard and check reuse", func() {
+ Expect(ring.Len(), 2)
+
+ wantShard := ring.ShardByName("ringShardOne")
+ ring.SetAddrs(map[string]string{
+ "ringShardOne": ":" + ringShard1Port,
+ })
+ Expect(ring.Len(), 1)
+ gotShard := ring.ShardByName("ringShardOne")
+ Expect(gotShard).To(BeIdenticalTo(wantShard))
+
+ ring.SetAddrs(map[string]string{
+ "ringShardOne": ":" + ringShard1Port,
+ "ringShardTwo": ":" + ringShard2Port,
+ })
+ Expect(ring.Len(), 2)
+ gotShard = ring.ShardByName("ringShardOne")
+ Expect(gotShard).To(BeIdenticalTo(wantShard))
+ })
+
+ It("uses 3 shards after setting it to 3 shards", func() {
+ Expect(ring.Len(), 2)
+
+ shardName1 := "ringShardOne"
+ shardAddr1 := ":" + ringShard1Port
+ wantShard1 := ring.ShardByName(shardName1)
+ shardName2 := "ringShardTwo"
+ shardAddr2 := ":" + ringShard2Port
+ wantShard2 := ring.ShardByName(shardName2)
+ shardName3 := "ringShardThree"
+ shardAddr3 := ":" + ringShard3Port
+
+ ring.SetAddrs(map[string]string{
+ shardName1: shardAddr1,
+ shardName2: shardAddr2,
+ shardName3: shardAddr3,
+ })
+ Expect(ring.Len(), 3)
+ gotShard1 := ring.ShardByName(shardName1)
+ gotShard2 := ring.ShardByName(shardName2)
+ gotShard3 := ring.ShardByName(shardName3)
+ Expect(gotShard1).To(BeIdenticalTo(wantShard1))
+ Expect(gotShard2).To(BeIdenticalTo(wantShard2))
+ Expect(gotShard3).ToNot(BeNil())
+
+ ring.SetAddrs(map[string]string{
+ shardName1: shardAddr1,
+ shardName2: shardAddr2,
+ })
+ Expect(ring.Len(), 2)
+ gotShard1 = ring.ShardByName(shardName1)
+ gotShard2 = ring.ShardByName(shardName2)
+ gotShard3 = ring.ShardByName(shardName3)
+ Expect(gotShard1).To(BeIdenticalTo(wantShard1))
+ Expect(gotShard2).To(BeIdenticalTo(wantShard2))
+ Expect(gotShard3).To(BeNil())
+ })
+ })
Describe("pipeline", func() {
+ It("doesn't panic closed ring, returns error", func() {
+ pipe := ring.Pipeline()
+ for i := 0; i < 3; i++ {
+ err := pipe.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ Expect(ring.Close()).NotTo(HaveOccurred())
+
+ Expect(func() {
+ _, execErr := pipe.Exec(ctx)
+ Expect(execErr).To(HaveOccurred())
+ }).NotTo(Panic())
+ })
+
It("distributes keys", func() {
pipe := ring.Pipeline()
for i := 0; i < 100; i++ {
@@ -123,7 +252,6 @@ var _ = Describe("Redis Ring", func() {
cmds, err := pipe.Exec(ctx)
Expect(err).NotTo(HaveOccurred())
Expect(cmds).To(HaveLen(100))
- Expect(pipe.Close()).NotTo(HaveOccurred())
for _, cmd := range cmds {
Expect(cmd.Err()).NotTo(HaveOccurred())
@@ -176,7 +304,8 @@ var _ = Describe("Redis Ring", func() {
Describe("new client callback", func() {
It("can be initialized with a new client callback", func() {
opts := redisRingOptions()
- opts.NewClient = func(name string, opt *redis.Options) *redis.Client {
+ opts.NewClient = func(opt *redis.Options) *redis.Client {
+ opt.Username = "username1"
opt.Password = "password1"
return redis.NewClient(opt)
}
@@ -184,7 +313,7 @@ var _ = Describe("Redis Ring", func() {
err := ring.Ping(ctx).Err()
Expect(err).To(HaveOccurred())
- Expect(err.Error()).To(ContainSubstring("ERR AUTH"))
+ Expect(err.Error()).To(ContainSubstring("WRONGPASS"))
})
})
@@ -203,29 +332,35 @@ var _ = Describe("Redis Ring", func() {
var stack []string
ring.AddHook(&hook{
- beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- Expect(cmd.String()).To(Equal("ping: "))
- stack = append(stack, "ring.BeforeProcess")
- return ctx, nil
- },
- afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
- Expect(cmd.String()).To(Equal("ping: PONG"))
- stack = append(stack, "ring.AfterProcess")
- return nil
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "ring.BeforeProcess")
+
+ err := hook(ctx, cmd)
+
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "ring.AfterProcess")
+
+ return err
+ }
},
})
ring.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
shard.AddHook(&hook{
- beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- Expect(cmd.String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcess")
- return ctx, nil
- },
- afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
- Expect(cmd.String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcess")
- return nil
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcess")
+
+ err := hook(ctx, cmd)
+
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcess")
+
+ return err
+ }
},
})
return nil
@@ -248,33 +383,39 @@ var _ = Describe("Redis Ring", func() {
var stack []string
ring.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: "))
- stack = append(stack, "ring.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: PONG"))
- stack = append(stack, "ring.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "ring.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "ring.AfterProcessPipeline")
+
+ return err
+ }
},
})
ring.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
shard.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+
+ return err
+ }
},
})
return nil
@@ -300,33 +441,43 @@ var _ = Describe("Redis Ring", func() {
var stack []string
ring.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: "))
- stack = append(stack, "ring.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: PONG"))
- stack = append(stack, "ring.AfterProcessPipeline")
- return nil
- },
- })
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ defer GinkgoRecover()
- ring.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
- shard.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
Expect(cmds).To(HaveLen(3))
Expect(cmds[1].String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
+ stack = append(stack, "ring.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
Expect(cmds).To(HaveLen(3))
Expect(cmds[1].String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcessPipeline")
- return nil
+ stack = append(stack, "ring.AfterProcessPipeline")
+
+ return err
+ }
+ },
+ })
+
+ ring.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
+ shard.AddHook(&hook{
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ defer GinkgoRecover()
+
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+
+ return err
+ }
},
})
return nil
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/script.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/script.go
index 5cab18d..626ab03 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/script.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/script.go
@@ -5,12 +5,13 @@ import (
"crypto/sha1"
"encoding/hex"
"io"
- "strings"
)
type Scripter interface {
Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
ScriptLoad(ctx context.Context, script string) *StringCmd
}
@@ -50,16 +51,34 @@ func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...in
return c.Eval(ctx, s.src, keys, args...)
}
+func (s *Script) EvalRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalRO(ctx, s.src, keys, args...)
+}
+
func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
return c.EvalSha(ctx, s.hash, keys, args...)
}
+func (s *Script) EvalShaRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalShaRO(ctx, s.hash, keys, args...)
+}
+
// Run optimistically uses EVALSHA to run the script. If script does not exist
// it is retried using EVAL.
func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
r := s.EvalSha(ctx, c, keys, args...)
- if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+ if HasErrorPrefix(r.Err(), "NOSCRIPT") {
return s.Eval(ctx, c, keys, args...)
}
return r
}
+
+// RunRO optimistically uses EVALSHA_RO to run the script. If script does not exist
+// it is retried using EVAL_RO.
+func (s *Script) RunRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalShaRO(ctx, c, keys, args...)
+ if HasErrorPrefix(r.Err(), "NOSCRIPT") {
+ return s.EvalRO(ctx, c, keys, args...)
+ }
+ return r
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripting_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripting_commands.go
new file mode 100644
index 0000000..af9c339
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripting_commands.go
@@ -0,0 +1,215 @@
+package redis
+
+import "context"
+
+type ScriptingFunctionsCmdable interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptFlush(ctx context.Context) *StatusCmd
+ ScriptKill(ctx context.Context) *StatusCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+
+ FunctionLoad(ctx context.Context, code string) *StringCmd
+ FunctionLoadReplace(ctx context.Context, code string) *StringCmd
+ FunctionDelete(ctx context.Context, libName string) *StringCmd
+ FunctionFlush(ctx context.Context) *StringCmd
+ FunctionKill(ctx context.Context) *StringCmd
+ FunctionFlushAsync(ctx context.Context) *StringCmd
+ FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd
+ FunctionDump(ctx context.Context) *StringCmd
+ FunctionRestore(ctx context.Context, libDump string) *StringCmd
+ FunctionStats(ctx context.Context) *FunctionStatsCmd
+ FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+}
+
+func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "eval", script, keys, args...)
+}
+
+func (c cmdable) EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "eval_ro", script, keys, args...)
+}
+
+func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "evalsha", sha1, keys, args...)
+}
+
+func (c cmdable) EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "evalsha_ro", sha1, keys, args...)
+}
+
+func (c cmdable) eval(ctx context.Context, name, payload string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = name
+ cmdArgs[1] = payload
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+
+ // it is possible that only args exist without a key.
+ // rdb.eval(ctx, eval, script, nil, arg1, arg2)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ------------------------------------------------------------------------------
+
+// FunctionListQuery is used with FunctionList to query for Redis libraries
+//
+// LibraryNamePattern - Use an empty string to get all libraries.
+// - Use a glob-style pattern to match multiple libraries with a matching name
+// - Use a library's full name to match a single library
+// WithCode - If true, it will return the code of the library
+type FunctionListQuery struct {
+ LibraryNamePattern string
+ WithCode bool
+}
+
+func (c cmdable) FunctionLoad(ctx context.Context, code string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "load", code)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionLoadReplace(ctx context.Context, code string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "load", "replace", code)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionDelete(ctx context.Context, libName string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "delete", libName)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionFlush(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionKill(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionFlushAsync(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "flush", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd {
+ args := make([]interface{}, 2, 5)
+ args[0] = "function"
+ args[1] = "list"
+ if q.LibraryNamePattern != "" {
+ args = append(args, "libraryname", q.LibraryNamePattern)
+ }
+ if q.WithCode {
+ args = append(args, "withcode")
+ }
+ cmd := NewFunctionListCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionDump(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "dump")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionRestore(ctx context.Context, libDump string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "restore", libDump)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionStats(ctx context.Context) *FunctionStatsCmd {
+ cmd := NewFunctionStatsCmd(ctx, "function", "stats")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := fcallArgs("fcall", function, keys, args...)
+ cmd := NewCmd(ctx, cmdArgs...)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FCallRo this function simply calls FCallRO,
+// Deprecated: to maintain convention FCallRO.
+func (c cmdable) FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ return c.FCallRO(ctx, function, keys, args...)
+}
+
+func (c cmdable) FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := fcallArgs("fcall_ro", function, keys, args...)
+ cmd := NewCmd(ctx, cmdArgs...)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func fcallArgs(command string, function string, keys []string, args ...interface{}) []interface{} {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = command
+ cmdArgs[1] = function
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+
+ cmdArgs = append(cmdArgs, args...)
+ return cmdArgs
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/bump_deps.sh
index f294c4f..f294c4f 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/bump_deps.sh
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/release.sh b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/release.sh
index 2e78be6..cd4ddee 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/release.sh
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/release.sh
@@ -48,14 +48,15 @@ PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; \
for dir in $PACKAGE_DIRS
do
printf "${dir}: go get -u && go mod tidy\n"
- (cd ./${dir} && go get -u && go mod tidy)
+ #(cd ./${dir} && go get -u && go mod tidy -compat=1.18)
done
for dir in $PACKAGE_DIRS
do
sed --in-place \
- "s/go-redis\/redis\([^ ]*\) v.*/go-redis\/redis\1 ${TAG}/" "${dir}/go.mod"
- (cd ./${dir} && go get -u && go mod tidy)
+ "s/redis\/go-redis\([^ ]*\) v.*/redis\/go-redis\1 ${TAG}/" "${dir}/go.mod"
+ #(cd ./${dir} && go get -u && go mod tidy -compat=1.18)
+ (cd ./${dir} && go mod tidy -compat=1.18)
done
sed --in-place "s/\(return \)\"[^\"]*\"/\1\"${TAG#v}\"/" ./version.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/tag.sh b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/tag.sh
index 121f00e..121f00e 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/tag.sh
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/tag.sh
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel.go
index ec6221d..188f884 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel.go
@@ -9,9 +9,9 @@ import (
"sync"
"time"
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/rand"
)
//------------------------------------------------------------------------------
@@ -24,6 +24,9 @@ type FailoverOptions struct {
// A seed list of host:port addresses of sentinel nodes.
SentinelAddrs []string
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
// If specified with SentinelPassword, enables ACL-based authentication (via
// AUTH <user> <pass>).
SentinelUsername string
@@ -32,25 +35,26 @@ type FailoverOptions struct {
// authentication.
SentinelPassword string
- // Allows routing read-only commands to the closest master or slave node.
+ // Allows routing read-only commands to the closest master or replica node.
// This option only works with NewFailoverClusterClient.
RouteByLatency bool
- // Allows routing read-only commands to the random master or slave node.
+ // Allows routing read-only commands to the random master or replica node.
// This option only works with NewFailoverClusterClient.
RouteRandomly bool
- // Route all commands to slave read-only nodes.
- SlaveOnly bool
+ // Route all commands to replica read-only nodes.
+ ReplicaOnly bool
- // Use slaves disconnected with master when cannot get connected slaves
- // Now, this option only works in RandomSlaveAddr function.
- UseDisconnectedSlaves bool
+ // Use replicas disconnected with master when cannot get connected replicas
+ // Now, this option only works in RandomReplicaAddr function.
+ UseDisconnectedReplicas bool
// Following options are copied from Options struct.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
OnConnect func(ctx context.Context, cn *Conn) error
+ Protocol int
Username string
Password string
DB int
@@ -59,31 +63,37 @@ type FailoverOptions struct {
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
TLSConfig *tls.Config
+
+ DisableIndentity bool
+ IdentitySuffix string
}
func (opt *FailoverOptions) clientOptions() *Options {
return &Options{
- Addr: "FailoverClient",
+ Addr: "FailoverClient",
+ ClientName: opt.ClientName,
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
DB: opt.DB,
+ Protocol: opt.Protocol,
Username: opt.Username,
Password: opt.Password,
@@ -91,25 +101,31 @@ func (opt *FailoverOptions) clientOptions() *Options {
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
}
}
func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
return &Options{
- Addr: addr,
+ Addr: addr,
+ ClientName: opt.ClientName,
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
@@ -122,27 +138,35 @@ func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
}
}
func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
return &ClusterOptions{
+ ClientName: opt.ClientName,
+
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
+ Protocol: opt.Protocol,
Username: opt.Username,
Password: opt.Password,
@@ -154,19 +178,24 @@ func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
}
}
@@ -194,10 +223,21 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
}
opt := failoverOpt.clientOptions()
- opt.Dialer = masterSlaveDialer(failover)
+ opt.Dialer = masterReplicaDialer(failover)
opt.init()
- connPool := newConnPool(opt)
+ var connPool *pool.ConnPool
+
+ rdb := &Client{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+ rdb.init()
+
+ connPool = newConnPool(opt, rdb.dialHook)
+ rdb.connPool = connPool
+ rdb.onClose = failover.Close
failover.mu.Lock()
failover.onFailover = func(ctx context.Context, addr string) {
@@ -207,25 +247,18 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
}
failover.mu.Unlock()
- c := Client{
- baseClient: newBaseClient(opt, connPool),
- ctx: context.Background(),
- }
- c.cmdable = c.Process
- c.onClose = failover.Close
-
- return &c
+ return rdb
}
-func masterSlaveDialer(
+func masterReplicaDialer(
failover *sentinelFailover,
) func(ctx context.Context, network, addr string) (net.Conn, error) {
return func(ctx context.Context, network, _ string) (net.Conn, error) {
var addr string
var err error
- if failover.opt.SlaveOnly {
- addr, err = failover.RandomSlaveAddr(ctx)
+ if failover.opt.ReplicaOnly {
+ addr, err = failover.RandomReplicaAddr(ctx)
} else {
addr, err = failover.MasterAddr(ctx)
if err == nil {
@@ -255,37 +288,30 @@ func masterSlaveDialer(
// SentinelClient is a client for a Redis Sentinel.
type SentinelClient struct {
*baseClient
- hooks
- ctx context.Context
+ hooksMixin
}
func NewSentinelClient(opt *Options) *SentinelClient {
opt.init()
c := &SentinelClient{
baseClient: &baseClient{
- opt: opt,
- connPool: newConnPool(opt),
+ opt: opt,
},
- ctx: context.Background(),
}
- return c
-}
-func (c *SentinelClient) Context() context.Context {
- return c.ctx
-}
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ })
+ c.connPool = newConnPool(opt, c.dialHook)
-func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.ctx = ctx
- return &clone
+ return c
}
func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
}
func (c *SentinelClient) pubSub() *PubSub {
@@ -335,8 +361,8 @@ func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *
return cmd
}
-func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
+func (c *SentinelClient) Sentinels(ctx context.Context, name string) *MapStringStringSliceCmd {
+ cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "sentinels", name)
_ = c.Process(ctx, cmd)
return cmd
}
@@ -351,7 +377,7 @@ func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
// Reset resets all the masters with matching name. The pattern argument is a
// glob-style pattern. The reset process clears any previous state in a master
-// (including a failover in progress), and removes every slave and sentinel
+// (including a failover in progress), and removes every replica and sentinel
// already discovered and associated with the master.
func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
@@ -368,8 +394,8 @@ func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
}
// Master shows the state and info of the specified master.
-func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
- cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
+func (c *SentinelClient) Master(ctx context.Context, name string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "sentinel", "master", name)
_ = c.Process(ctx, cmd)
return cmd
}
@@ -381,9 +407,9 @@ func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
return cmd
}
-// Slaves shows a list of slaves for the specified master and their state.
-func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
+// Replicas shows a list of replicas for the specified master and their state.
+func (c *SentinelClient) Replicas(ctx context.Context, name string) *MapStringStringSliceCmd {
+ cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "replicas", name)
_ = c.Process(ctx, cmd)
return cmd
}
@@ -460,18 +486,18 @@ func (c *sentinelFailover) closeSentinel() error {
return firstErr
}
-func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
+func (c *sentinelFailover) RandomReplicaAddr(ctx context.Context) (string, error) {
if c.opt == nil {
return "", errors.New("opt is nil")
}
- addresses, err := c.slaveAddrs(ctx, false)
+ addresses, err := c.replicaAddrs(ctx, false)
if err != nil {
return "", err
}
- if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
- addresses, err = c.slaveAddrs(ctx, true)
+ if len(addresses) == 0 && c.opt.UseDisconnectedReplicas {
+ addresses, err = c.replicaAddrs(ctx, true)
if err != nil {
return "", err
}
@@ -489,8 +515,15 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
c.mu.RUnlock()
if sentinel != nil {
- addr := c.getMasterAddr(ctx, sentinel)
- if addr != "" {
+ addr, err := c.getMasterAddr(ctx, sentinel)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else {
return addr, nil
}
}
@@ -499,11 +532,18 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
defer c.mu.Unlock()
if c.sentinel != nil {
- addr := c.getMasterAddr(ctx, c.sentinel)
- if addr != "" {
+ addr, err := c.getMasterAddr(ctx, c.sentinel)
+ if err != nil {
+ _ = c.closeSentinel()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else {
return addr, nil
}
- _ = c.closeSentinel()
}
for i, sentinelAddr := range c.sentinelAddrs {
@@ -511,9 +551,12 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
if err != nil {
+ _ = sentinel.Close()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
c.opt.MasterName, err)
- _ = sentinel.Close()
continue
}
@@ -528,14 +571,21 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
return "", errors.New("redis: all sentinels specified in configuration are unreachable")
}
-func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
+func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
c.mu.RLock()
sentinel := c.sentinel
c.mu.RUnlock()
if sentinel != nil {
- addrs := c.getSlaveAddrs(ctx, sentinel)
- if len(addrs) > 0 {
+ addrs, err := c.getReplicaAddrs(ctx, sentinel)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else if len(addrs) > 0 {
return addrs, nil
}
}
@@ -544,11 +594,21 @@ func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool)
defer c.mu.Unlock()
if c.sentinel != nil {
- addrs := c.getSlaveAddrs(ctx, c.sentinel)
- if len(addrs) > 0 {
+ addrs, err := c.getReplicaAddrs(ctx, c.sentinel)
+ if err != nil {
+ _ = c.closeSentinel()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else if len(addrs) > 0 {
return addrs, nil
+ } else {
+ // No error and no replicas.
+ _ = c.closeSentinel()
}
- _ = c.closeSentinel()
}
var sentinelReachable bool
@@ -556,15 +616,18 @@ func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool)
for i, sentinelAddr := range c.sentinelAddrs {
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
- slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+ replicas, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
- c.opt.MasterName, err)
_ = sentinel.Close()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ internal.Logger.Printf(ctx, "sentinel: Replicas master=%q failed: %s",
+ c.opt.MasterName, err)
continue
}
sentinelReachable = true
- addrs := parseSlaveAddrs(slaves, useDisconnected)
+ addrs := parseReplicaAddrs(replicas, useDisconnected)
if len(addrs) == 0 {
continue
}
@@ -581,60 +644,42 @@ func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool)
return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
}
-func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
+func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) (string, error) {
addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
if err != nil {
- internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
- c.opt.MasterName, err)
- return ""
+ return "", err
}
- return net.JoinHostPort(addr[0], addr[1])
+ return net.JoinHostPort(addr[0], addr[1]), nil
}
-func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
- addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+func (c *sentinelFailover) getReplicaAddrs(ctx context.Context, sentinel *SentinelClient) ([]string, error) {
+ addrs, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
c.opt.MasterName, err)
- return []string{}
+ return nil, err
}
- return parseSlaveAddrs(addrs, false)
+ return parseReplicaAddrs(addrs, false), nil
}
-func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
+func parseReplicaAddrs(addrs []map[string]string, keepDisconnected bool) []string {
nodes := make([]string, 0, len(addrs))
for _, node := range addrs {
- ip := ""
- port := ""
- flags := []string{}
- lastkey := ""
isDown := false
-
- for _, key := range node.([]interface{}) {
- switch lastkey {
- case "ip":
- ip = key.(string)
- case "port":
- port = key.(string)
- case "flags":
- flags = strings.Split(key.(string), ",")
- }
- lastkey = key.(string)
- }
-
- for _, flag := range flags {
- switch flag {
- case "s_down", "o_down":
- isDown = true
- case "disconnected":
- if !keepDisconnected {
+ if flags, ok := node["flags"]; ok {
+ for _, flag := range strings.Split(flags, ",") {
+ switch flag {
+ case "s_down", "o_down":
isDown = true
+ case "disconnected":
+ if !keepDisconnected {
+ isDown = true
+ }
}
}
}
-
- if !isDown {
- nodes = append(nodes, net.JoinHostPort(ip, port))
+ if !isDown && node["ip"] != "" && node["port"] != "" {
+ nodes = append(nodes, net.JoinHostPort(node["ip"], node["port"]))
}
}
@@ -672,7 +717,7 @@ func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelCl
c.sentinel = sentinel
c.discoverSentinels(ctx)
- c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
+ c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+replica-reconf-done")
go c.listen(c.pubsub)
}
@@ -683,16 +728,13 @@ func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
return
}
for _, sentinel := range sentinels {
- vals := sentinel.([]interface{})
- var ip, port string
- for i := 0; i < len(vals); i += 2 {
- key := vals[i].(string)
- switch key {
- case "ip":
- ip = vals[i+1].(string)
- case "port":
- port = vals[i+1].(string)
- }
+ ip, ok := sentinel["ip"]
+ if !ok {
+ continue
+ }
+ port, ok := sentinel["port"]
+ if !ok {
+ continue
}
if ip != "" && port != "" {
sentinelAddr := net.JoinHostPort(ip, port)
@@ -742,7 +784,7 @@ func contains(slice []string, str string) bool {
//------------------------------------------------------------------------------
// NewFailoverClusterClient returns a client that supports routing read-only commands
-// to a slave node.
+// to a replica node.
func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
copy(sentinelAddrs, failoverOpt.SentinelAddrs)
@@ -763,14 +805,14 @@ func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
Addr: masterAddr,
}}
- slaveAddrs, err := failover.slaveAddrs(ctx, false)
+ replicaAddrs, err := failover.replicaAddrs(ctx, false)
if err != nil {
return nil, err
}
- for _, slaveAddr := range slaveAddrs {
+ for _, replicaAddr := range replicaAddrs {
nodes = append(nodes, ClusterNode{
- Addr: slaveAddr,
+ Addr: replicaAddr,
})
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel_test.go
index 753e0fc..8bc6c57 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel_test.go
@@ -1,14 +1,39 @@
package redis_test
import (
+ "context"
"net"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
+var _ = Describe("Sentinel PROTO 2", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: sentinelName,
+ SentinelAddrs: sentinelAddrs,
+ MaxRetries: -1,
+ Protocol: 2,
+ })
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ _ = client.Close()
+ })
+
+ It("should sentinel client PROTO 2", func() {
+ val, err := client.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainElements("proto", int64(2)))
+ })
+})
+
var _ = Describe("Sentinel", func() {
var client *redis.Client
var master *redis.Client
@@ -17,6 +42,7 @@ var _ = Describe("Sentinel", func() {
BeforeEach(func() {
client = redis.NewFailoverClient(&redis.FailoverOptions{
+ ClientName: "sentinel_hi",
MasterName: sentinelName,
SentinelAddrs: sentinelAddrs,
MaxRetries: -1,
@@ -125,6 +151,47 @@ var _ = Describe("Sentinel", func() {
err := client.Ping(ctx).Err()
Expect(err).NotTo(HaveOccurred())
})
+
+ It("should sentinel client setname", func() {
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+ val, err := client.ClientList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainSubstring("name=sentinel_hi"))
+ })
+
+ It("should sentinel client PROTO 3", func() {
+ val, err := client.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(HaveKeyWithValue("proto", int64(3)))
+ })
+})
+
+var _ = Describe("NewFailoverClusterClient PROTO 2", func() {
+ var client *redis.ClusterClient
+
+ BeforeEach(func() {
+ client = redis.NewFailoverClusterClient(&redis.FailoverOptions{
+ MasterName: sentinelName,
+ SentinelAddrs: sentinelAddrs,
+ Protocol: 2,
+
+ RouteRandomly: true,
+ })
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ _ = client.Close()
+ })
+
+ It("should sentinel cluster PROTO 2", func() {
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := client.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainElements("proto", int64(2)))
+ return nil
+ })
+ })
})
var _ = Describe("NewFailoverClusterClient", func() {
@@ -134,6 +201,7 @@ var _ = Describe("NewFailoverClusterClient", func() {
BeforeEach(func() {
client = redis.NewFailoverClusterClient(&redis.FailoverOptions{
+ ClientName: "sentinel_cluster_hi",
MasterName: sentinelName,
SentinelAddrs: sentinelAddrs,
@@ -173,6 +241,7 @@ var _ = Describe("NewFailoverClusterClient", func() {
})
It("should facilitate failover", func() {
+ Skip("Flaky Test")
// Set value.
err := client.Set(ctx, "foo", "master", 0).Err()
Expect(err).NotTo(HaveOccurred())
@@ -185,13 +254,14 @@ var _ = Describe("NewFailoverClusterClient", func() {
}
// Create subscription.
- ch := client.Subscribe(ctx, "foo").Channel()
+ sub := client.Subscribe(ctx, "foo")
+ ch := sub.Channel()
// Kill master.
err = master.Shutdown(ctx).Err()
Expect(err).NotTo(HaveOccurred())
Eventually(func() error {
- return sentinelMaster.Ping(ctx).Err()
+ return master.Ping(ctx).Err()
}, "15s", "100ms").Should(HaveOccurred())
// Check that client picked up new master.
@@ -207,10 +277,36 @@ var _ = Describe("NewFailoverClusterClient", func() {
}, "15s", "100ms").Should(Receive(&msg))
Expect(msg.Channel).To(Equal("foo"))
Expect(msg.Payload).To(Equal("hello"))
+ Expect(sub.Close()).NotTo(HaveOccurred())
_, err = startRedis(masterPort)
Expect(err).NotTo(HaveOccurred())
})
+
+ It("should sentinel cluster client setname", func() {
+ Skip("Flaky Test")
+ err := client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ return c.Ping(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.ClientList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainSubstring("name=sentinel_cluster_hi"))
+ return nil
+ })
+ })
+
+ It("should sentinel cluster PROTO 3", func() {
+ Skip("Flaky Test")
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := client.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(HaveKeyWithValue("proto", int64(3)))
+ return nil
+ })
+ })
})
var _ = Describe("SentinelAclAuth", func() {
@@ -221,14 +317,14 @@ var _ = Describe("SentinelAclAuth", func() {
var client *redis.Client
var sentinel *redis.SentinelClient
- var sentinels = func() []*redisProcess {
+ sentinels := func() []*redisProcess {
return []*redisProcess{sentinel1, sentinel2, sentinel3}
}
BeforeEach(func() {
authCmd := redis.NewStatusCmd(ctx, "ACL", "SETUSER", aclSentinelUsername, "ON",
">"+aclSentinelPassword, "-@all", "+auth", "+client|getname", "+client|id", "+client|setname",
- "+command", "+hello", "+ping", "+role", "+sentinel|get-master-addr-by-name", "+sentinel|master",
+ "+command", "+hello", "+ping", "+client|setinfo", "+role", "+sentinel|get-master-addr-by-name", "+sentinel|master",
"+sentinel|myid", "+sentinel|replicas", "+sentinel|sentinels")
for _, process := range sentinels() {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/set_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/set_commands.go
new file mode 100644
index 0000000..cef8ad6
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/set_commands.go
@@ -0,0 +1,217 @@
+package redis
+
+import "context"
+
+type SetCmdable interface {
+ SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SCard(ctx context.Context, key string) *IntCmd
+ SDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SInter(ctx context.Context, keys ...string) *StringSliceCmd
+ SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
+ SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+ SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
+ SMembers(ctx context.Context, key string) *StringSliceCmd
+ SMembersMap(ctx context.Context, key string) *StringStructMapCmd
+ SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
+ SPop(ctx context.Context, key string) *StringCmd
+ SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRandMember(ctx context.Context, key string) *StringCmd
+ SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ SUnion(ctx context.Context, keys ...string) *StringSliceCmd
+ SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "scard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "sintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "sismember", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
+func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "smismember"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembers Redis `SMEMBERS key` command output as a slice.
+func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembersMap Redis `SMEMBERS key` command output as a map.
+func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "smove", source, destination, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPop Redis `SPOP key` command.
+func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "spop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPopN Redis `SPOP key count` command.
+func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "spop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMember Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "srandmember", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMemberN Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sortedset_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sortedset_commands.go
new file mode 100644
index 0000000..6701402
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sortedset_commands.go
@@ -0,0 +1,772 @@
+package redis
+
+import (
+ "context"
+ "strings"
+ "time"
+)
+
+type SortedSetCmdable interface {
+ BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd
+ ZAdd(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
+ ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
+ ZCard(ctx context.Context, key string) *IntCmd
+ ZCount(ctx context.Context, key, min, max string) *IntCmd
+ ZLexCount(ctx context.Context, key, min, max string) *IntCmd
+ ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+ ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
+ ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
+ ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
+ ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+ ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd
+ ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
+ ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
+ ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
+ ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
+ ZRank(ctx context.Context, key, member string) *IntCmd
+ ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
+ ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
+ ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
+ ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRevRank(ctx context.Context, key, member string) *IntCmd
+ ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
+ ZScore(ctx context.Context, key, member string) *FloatCmd
+ ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+ ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd
+ ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd
+ ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
+ ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
+ ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
+ ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+}
+
+// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmax"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmin"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BZMPop is the blocking variant of ZMPOP.
+// When any of the sorted sets contains elements, this command behaves exactly like ZMPOP.
+// When all sorted sets are empty, Redis will block the connection until another client adds members to one of the keys or until the timeout elapses.
+// A timeout of zero can be used to block indefinitely.
+// example: client.BZMPop(ctx, 0,"max", 1, "set")
+func (c cmdable) BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 3+len(keys), 6+len(keys))
+ args[0] = "bzmpop"
+ args[1] = formatSec(ctx, timeout)
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
+type ZAddArgs struct {
+ NX bool
+ XX bool
+ LT bool
+ GT bool
+ Ch bool
+ Members []Z
+}
+
+func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
+ a := make([]interface{}, 0, 6+2*len(args.Members))
+ a = append(a, "zadd", key)
+
+ // The GT, LT and NX options are mutually exclusive.
+ if args.NX {
+ a = append(a, "nx")
+ } else {
+ if args.XX {
+ a = append(a, "xx")
+ }
+ if args.GT {
+ a = append(a, "gt")
+ } else if args.LT {
+ a = append(a, "lt")
+ }
+ }
+ if args.Ch {
+ a = append(a, "ch")
+ }
+ if incr {
+ a = append(a, "incr")
+ }
+ for _, m := range args.Members {
+ a = append(a, m.Score)
+ a = append(a, m.Member)
+ }
+ return a
+}
+
+func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
+ cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
+ cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAdd Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ Members: members,
+ })
+}
+
+// ZAddLT Redis `ZADD key LT score member [score member ...]` command.
+func (c cmdable) ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ LT: true,
+ Members: members,
+ })
+}
+
+// ZAddGT Redis `ZADD key GT score member [score member ...]` command.
+func (c cmdable) ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ GT: true,
+ Members: members,
+ })
+}
+
+// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ NX: true,
+ Members: members,
+ })
+}
+
+// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ XX: true,
+ Members: members,
+ })
+}
+
+func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinterstore", destination, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "zintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZMPop Pops one or more elements with the highest or lowest score from the first non-empty sorted set key from the list of provided key names.
+// direction: "max" (highest score) or "min" (lowest score), count: > 0
+// example: client.ZMPop(ctx, "max", 5, "set1", "set2")
+func (c cmdable) ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 2+len(keys), 5+len(keys))
+ args[0] = "zmpop"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "zmscore"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmax",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmin",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRangeArgs is all the options of the ZRange command.
+// In version> 6.2.0, you can replace the(cmd):
+//
+// ZREVRANGE,
+// ZRANGEBYSCORE,
+// ZREVRANGEBYSCORE,
+// ZRANGEBYLEX,
+// ZREVRANGEBYLEX.
+//
+// Please pay attention to your redis-server version.
+//
+// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
+type ZRangeArgs struct {
+ Key string
+
+ // When the ByScore option is provided, the open interval(exclusive) can be set.
+ // By default, the score intervals specified by <Start> and <Stop> are closed (inclusive).
+ // It is similar to the deprecated(6.2.0+) ZRangeByScore command.
+ // For example:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "(3",
+ // Stop: 8,
+ // ByScore: true,
+ // }
+ // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8).
+ //
+ // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
+ // You can set the <Start> and <Stop> options as follows:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "[abc",
+ // Stop: "(def",
+ // ByLex: true,
+ // }
+ // cmd: "ZRange example-key [abc (def ByLex"
+ //
+ // For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int).
+ // You can read the documentation for more information: https://redis.io/commands/zrange
+ Start interface{}
+ Stop interface{}
+
+ // The ByScore and ByLex options are mutually exclusive.
+ ByScore bool
+ ByLex bool
+
+ Rev bool
+
+ // limit offset count.
+ Offset int64
+ Count int64
+}
+
+func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
+ // For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>.
+ if z.Rev && (z.ByScore || z.ByLex) {
+ args = append(args, z.Key, z.Stop, z.Start)
+ } else {
+ args = append(args, z.Key, z.Start, z.Stop)
+ }
+
+ if z.ByScore {
+ args = append(args, "byscore")
+ } else if z.ByLex {
+ args = append(args, "bylex")
+ }
+ if z.Rev {
+ args = append(args, "rev")
+ }
+ if z.Offset != 0 || z.Count != 0 {
+ args = append(args, "limit", z.Offset, z.Count)
+ }
+ return args
+}
+
+func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ return c.ZRangeArgs(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrangestore", dst)
+ args = z.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRankWithScore according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
+func (c cmdable) ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRevRangeWithScores according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
+func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrevrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrevrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zscore", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunionstore", dest, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMember redis-server version >= 6.2.0.
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMemberWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrandmember", key, count, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiff redis-server version >= 6.2.0.
+func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+ args[len(keys)+2] = "withscores"
+
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffStore redis-server version >=6.2.0.
+func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 0, 3+len(keys))
+ args = append(args, "zdiffstore", destination, len(keys))
+ for _, key := range keys {
+ args = append(args, key)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+ Z
+ Key string
+}
+
+// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
+type ZStore struct {
+ Keys []string
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+func (z ZStore) len() (n int) {
+ n = len(z.Keys)
+ if len(z.Weights) > 0 {
+ n += 1 + len(z.Weights)
+ }
+ if z.Aggregate != "" {
+ n += 2
+ }
+ return n
+}
+
+func (z ZStore) appendArgs(args []interface{}) []interface{} {
+ for _, key := range z.Keys {
+ args = append(args, key)
+ }
+ if len(z.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weights := range z.Weights {
+ args = append(args, weights)
+ }
+ }
+ if z.Aggregate != "" {
+ args = append(args, "aggregate", z.Aggregate)
+ }
+ return args
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/stream_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/stream_commands.go
new file mode 100644
index 0000000..0a98692
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/stream_commands.go
@@ -0,0 +1,438 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type StreamCmdable interface {
+ XAdd(ctx context.Context, a *XAddArgs) *StringCmd
+ XDel(ctx context.Context, stream string, ids ...string) *IntCmd
+ XLen(ctx context.Context, stream string) *IntCmd
+ XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
+ XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
+ XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+ XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
+ XPending(ctx context.Context, stream, group string) *XPendingCmd
+ XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+ XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
+ XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
+ XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
+ XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
+ XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
+ XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
+ XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+ XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
+ XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
+}
+
+// XAddArgs accepts values in the following formats:
+// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
+// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
+// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
+//
+// Note that map will not preserve the order of key-value pairs.
+// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
+type XAddArgs struct {
+ Stream string
+ NoMkStream bool
+ MaxLen int64 // MAXLEN N
+ MinID string
+ // Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
+ Approx bool
+ Limit int64
+ ID string
+ Values interface{}
+}
+
+func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 11)
+ args = append(args, "xadd", a.Stream)
+ if a.NoMkStream {
+ args = append(args, "nomkstream")
+ }
+ switch {
+ case a.MaxLen > 0:
+ if a.Approx {
+ args = append(args, "maxlen", "~", a.MaxLen)
+ } else {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ case a.MinID != "":
+ if a.Approx {
+ args = append(args, "minid", "~", a.MinID)
+ } else {
+ args = append(args, "minid", a.MinID)
+ }
+ }
+ if a.Limit > 0 {
+ args = append(args, "limit", a.Limit)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ args = appendArg(args, a.Values)
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
+ args := []interface{}{"xdel", stream}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xlen", stream)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+}
+
+func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 6+len(a.Streams))
+ args = append(args, "xread")
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
+ return c.XRead(ctx, &XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ NoAck bool
+}
+
+func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 10+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+
+ keyPos := int8(4)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ if a.NoAck {
+ args = append(args, "noack")
+ keyPos++
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd(ctx, "xpending", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Idle time.Duration
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "xpending", a.Stream, a.Group)
+ if a.Idle != 0 {
+ args = append(args, "idle", formatMs(ctx, a.Idle))
+ }
+ args = append(args, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XAutoClaimArgs struct {
+ Stream string
+ Group string
+ MinIdle time.Duration
+ Start string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
+ args := xAutoClaimArgs(ctx, a)
+ cmd := NewXAutoClaimCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
+ args := xAutoClaimArgs(ctx, a)
+ args = append(args, "justid")
+ cmd := NewXAutoClaimJustIDCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 8)
+ args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ }
+ return args
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 5+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
+// example:
+//
+// XTRIM key MAXLEN/MINID threshold LIMIT limit.
+// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+//
+// The redis-server version is lower than 6.2, please set limit to 0.
+func (c cmdable) xTrim(
+ ctx context.Context, key, strategy string,
+ approx bool, threshold interface{}, limit int64,
+) *IntCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xtrim", key, strategy)
+ if approx {
+ args = append(args, "~")
+ }
+ args = append(args, threshold)
+ if limit > 0 {
+ args = append(args, "limit", limit)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MAXLEN maxLen
+func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
+}
+
+func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
+ return c.xTrim(ctx, key, "minid", false, minID, 0)
+}
+
+func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "minid", true, minID, limit)
+}
+
+func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
+ cmd := NewXInfoConsumersCmd(ctx, key, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
+ cmd := NewXInfoGroupsCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
+ cmd := NewXInfoStreamCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// XInfoStreamFull XINFO STREAM FULL [COUNT count]
+// redis-server >= 6.0.
+func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
+ args := make([]interface{}, 0, 6)
+ args = append(args, "xinfo", "stream", key, "full")
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewXInfoStreamFullCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/string_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/string_commands.go
new file mode 100644
index 0000000..eff5880
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/string_commands.go
@@ -0,0 +1,303 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type StringCmdable interface {
+ Append(ctx context.Context, key, value string) *IntCmd
+ Decr(ctx context.Context, key string) *IntCmd
+ DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
+ Get(ctx context.Context, key string) *StringCmd
+ GetRange(ctx context.Context, key string, start, end int64) *StringCmd
+ GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+ GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
+ GetDel(ctx context.Context, key string) *StringCmd
+ Incr(ctx context.Context, key string) *IntCmd
+ IncrBy(ctx context.Context, key string, value int64) *IntCmd
+ IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
+ LCS(ctx context.Context, q *LCSQuery) *LCSCmd
+ MGet(ctx context.Context, keys ...string) *SliceCmd
+ MSet(ctx context.Context, values ...interface{}) *StatusCmd
+ MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
+ Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
+ SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
+ StrLen(ctx context.Context, key string) *IntCmd
+}
+
+func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "append", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "decr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "decrby", key, decrement)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "get", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "getrange", key, start, end)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "getset", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
+// Requires Redis >= 6.2.0.
+func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
+ args := make([]interface{}, 0, 4)
+ args = append(args, "getex", key)
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == 0 {
+ args = append(args, "persist")
+ }
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetDel redis-server version >= 6.2.0.
+func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "getdel", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "incr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "incrby", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LCS(ctx context.Context, q *LCSQuery) *LCSCmd {
+ cmd := NewLCSCmd(ctx, q)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+// - MSet("key1", "value1", "key2", "value2")
+// - MSet([]string{"key1", "value1", "key2", "value2"})
+// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSet(struct), For struct types, see HSet description.
+func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "mset"
+ args = appendArgs(args, values)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+// - MSetNX("key1", "value1", "key2", "value2")
+// - MSetNX([]string{"key1", "value1", "key2", "value2"})
+// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSetNX(struct), For struct types, see HSet description.
+func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "msetnx"
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Set Redis `SET key value [expiration]` command.
+// Use expiration for `SETEx`-like behavior.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 5)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetArgs provides arguments for the SetArgs function.
+type SetArgs struct {
+ // Mode can be `NX` or `XX` or empty.
+ Mode string
+
+ // Zero `TTL` or `Expiration` means that the key has no expiration time.
+ TTL time.Duration
+ ExpireAt time.Time
+
+ // When Get is true, the command returns the old value stored at key, or nil when key did not exist.
+ Get bool
+
+ // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+ // otherwise you will receive an error: (error) ERR syntax error.
+ KeepTTL bool
+}
+
+// SetArgs supports all the options that the SET command supports.
+// It is the alternative to the Set function when you want
+// to have more control over the options.
+func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
+ args := []interface{}{"set", key, value}
+
+ if a.KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ if !a.ExpireAt.IsZero() {
+ args = append(args, "exat", a.ExpireAt.Unix())
+ }
+ if a.TTL > 0 {
+ if usePrecise(a.TTL) {
+ args = append(args, "px", formatMs(ctx, a.TTL))
+ } else {
+ args = append(args, "ex", formatSec(ctx, a.TTL))
+ }
+ }
+
+ if a.Mode != "" {
+ args = append(args, a.Mode)
+ }
+
+ if a.Get {
+ args = append(args, "get")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetEx Redis `SETEx key expiration value` command.
+func (c cmdable) SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetNX Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd(ctx, "setnx", key, value)
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetXX Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ cmd = NewBoolCmd(ctx, "set", key, value, "xx")
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "setrange", key, offset, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "strlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands.go
new file mode 100644
index 0000000..6f1b2fa
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands.go
@@ -0,0 +1,922 @@
+package redis
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type TimeseriesCmdable interface {
+ TSAdd(ctx context.Context, key string, timestamp interface{}, value float64) *IntCmd
+ TSAddWithArgs(ctx context.Context, key string, timestamp interface{}, value float64, options *TSOptions) *IntCmd
+ TSCreate(ctx context.Context, key string) *StatusCmd
+ TSCreateWithArgs(ctx context.Context, key string, options *TSOptions) *StatusCmd
+ TSAlter(ctx context.Context, key string, options *TSAlterOptions) *StatusCmd
+ TSCreateRule(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int) *StatusCmd
+ TSCreateRuleWithArgs(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int, options *TSCreateRuleOptions) *StatusCmd
+ TSIncrBy(ctx context.Context, Key string, timestamp float64) *IntCmd
+ TSIncrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd
+ TSDecrBy(ctx context.Context, Key string, timestamp float64) *IntCmd
+ TSDecrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd
+ TSDel(ctx context.Context, Key string, fromTimestamp int, toTimestamp int) *IntCmd
+ TSDeleteRule(ctx context.Context, sourceKey string, destKey string) *StatusCmd
+ TSGet(ctx context.Context, key string) *TSTimestampValueCmd
+ TSGetWithArgs(ctx context.Context, key string, options *TSGetOptions) *TSTimestampValueCmd
+ TSInfo(ctx context.Context, key string) *MapStringInterfaceCmd
+ TSInfoWithArgs(ctx context.Context, key string, options *TSInfoOptions) *MapStringInterfaceCmd
+ TSMAdd(ctx context.Context, ktvSlices [][]interface{}) *IntSliceCmd
+ TSQueryIndex(ctx context.Context, filterExpr []string) *StringSliceCmd
+ TSRevRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd
+ TSRevRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRevRangeOptions) *TSTimestampValueSliceCmd
+ TSRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd
+ TSRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRangeOptions) *TSTimestampValueSliceCmd
+ TSMRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd
+ TSMRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRangeOptions) *MapStringSliceInterfaceCmd
+ TSMRevRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd
+ TSMRevRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRevRangeOptions) *MapStringSliceInterfaceCmd
+ TSMGet(ctx context.Context, filters []string) *MapStringSliceInterfaceCmd
+ TSMGetWithArgs(ctx context.Context, filters []string, options *TSMGetOptions) *MapStringSliceInterfaceCmd
+}
+
+type TSOptions struct {
+ Retention int
+ ChunkSize int
+ Encoding string
+ DuplicatePolicy string
+ Labels map[string]string
+}
+type TSIncrDecrOptions struct {
+ Timestamp int64
+ Retention int
+ ChunkSize int
+ Uncompressed bool
+ Labels map[string]string
+}
+
+type TSAlterOptions struct {
+ Retention int
+ ChunkSize int
+ DuplicatePolicy string
+ Labels map[string]string
+}
+
+type TSCreateRuleOptions struct {
+ alignTimestamp int64
+}
+
+type TSGetOptions struct {
+ Latest bool
+}
+
+type TSInfoOptions struct {
+ Debug bool
+}
+type Aggregator int
+
+const (
+ Invalid = Aggregator(iota)
+ Avg
+ Sum
+ Min
+ Max
+ Range
+ Count
+ First
+ Last
+ StdP
+ StdS
+ VarP
+ VarS
+ Twa
+)
+
+func (a Aggregator) String() string {
+ switch a {
+ case Invalid:
+ return ""
+ case Avg:
+ return "AVG"
+ case Sum:
+ return "SUM"
+ case Min:
+ return "MIN"
+ case Max:
+ return "MAX"
+ case Range:
+ return "RANGE"
+ case Count:
+ return "COUNT"
+ case First:
+ return "FIRST"
+ case Last:
+ return "LAST"
+ case StdP:
+ return "STD.P"
+ case StdS:
+ return "STD.S"
+ case VarP:
+ return "VAR.P"
+ case VarS:
+ return "VAR.S"
+ case Twa:
+ return "TWA"
+ default:
+ return ""
+ }
+}
+
+type TSRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+}
+
+type TSRevRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+}
+
+type TSMRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ WithLabels bool
+ SelectedLabels []interface{}
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+ GroupByLabel interface{}
+ Reducer interface{}
+}
+
+type TSMRevRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ WithLabels bool
+ SelectedLabels []interface{}
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+ GroupByLabel interface{}
+ Reducer interface{}
+}
+
+type TSMGetOptions struct {
+ Latest bool
+ WithLabels bool
+ SelectedLabels []interface{}
+}
+
+// TSAdd - Adds one or more observations to a t-digest sketch.
+// For more information - https://redis.io/commands/ts.add/
+func (c cmdable) TSAdd(ctx context.Context, key string, timestamp interface{}, value float64) *IntCmd {
+ args := []interface{}{"TS.ADD", key, timestamp, value}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSAddWithArgs - Adds one or more observations to a t-digest sketch.
+// This function also allows for specifying additional options such as:
+// Retention, ChunkSize, Encoding, DuplicatePolicy and Labels.
+// For more information - https://redis.io/commands/ts.add/
+func (c cmdable) TSAddWithArgs(ctx context.Context, key string, timestamp interface{}, value float64, options *TSOptions) *IntCmd {
+ args := []interface{}{"TS.ADD", key, timestamp, value}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Encoding != "" {
+ args = append(args, "ENCODING", options.Encoding)
+ }
+
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreate - Creates a new time-series key.
+// For more information - https://redis.io/commands/ts.create/
+func (c cmdable) TSCreate(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TS.CREATE", key}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateWithArgs - Creates a new time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Retention, ChunkSize, Encoding, DuplicatePolicy and Labels.
+// For more information - https://redis.io/commands/ts.create/
+func (c cmdable) TSCreateWithArgs(ctx context.Context, key string, options *TSOptions) *StatusCmd {
+ args := []interface{}{"TS.CREATE", key}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Encoding != "" {
+ args = append(args, "ENCODING", options.Encoding)
+ }
+
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSAlter - Alters an existing time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Retention, ChunkSize and DuplicatePolicy.
+// For more information - https://redis.io/commands/ts.alter/
+func (c cmdable) TSAlter(ctx context.Context, key string, options *TSAlterOptions) *StatusCmd {
+ args := []interface{}{"TS.ALTER", key}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateRule - Creates a compaction rule from sourceKey to destKey.
+// For more information - https://redis.io/commands/ts.createrule/
+func (c cmdable) TSCreateRule(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int) *StatusCmd {
+ args := []interface{}{"TS.CREATERULE", sourceKey, destKey, "AGGREGATION", aggregator.String(), bucketDuration}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateRuleWithArgs - Creates a compaction rule from sourceKey to destKey with additional option.
+// This function allows for specifying additional option such as:
+// alignTimestamp.
+// For more information - https://redis.io/commands/ts.createrule/
+func (c cmdable) TSCreateRuleWithArgs(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int, options *TSCreateRuleOptions) *StatusCmd {
+ args := []interface{}{"TS.CREATERULE", sourceKey, destKey, "AGGREGATION", aggregator.String(), bucketDuration}
+ if options != nil {
+ if options.alignTimestamp != 0 {
+ args = append(args, options.alignTimestamp)
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSIncrBy - Increments the value of a time-series key by the specified timestamp.
+// For more information - https://redis.io/commands/ts.incrby/
+func (c cmdable) TSIncrBy(ctx context.Context, Key string, timestamp float64) *IntCmd {
+ args := []interface{}{"TS.INCRBY", Key, timestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSIncrByWithArgs - Increments the value of a time-series key by the specified timestamp with additional options.
+// This function allows for specifying additional options such as:
+// Timestamp, Retention, ChunkSize, Uncompressed and Labels.
+// For more information - https://redis.io/commands/ts.incrby/
+func (c cmdable) TSIncrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd {
+ args := []interface{}{"TS.INCRBY", key, timestamp}
+ if options != nil {
+ if options.Timestamp != 0 {
+ args = append(args, "TIMESTAMP", options.Timestamp)
+ }
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Uncompressed {
+ args = append(args, "UNCOMPRESSED")
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDecrBy - Decrements the value of a time-series key by the specified timestamp.
+// For more information - https://redis.io/commands/ts.decrby/
+func (c cmdable) TSDecrBy(ctx context.Context, Key string, timestamp float64) *IntCmd {
+ args := []interface{}{"TS.DECRBY", Key, timestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDecrByWithArgs - Decrements the value of a time-series key by the specified timestamp with additional options.
+// This function allows for specifying additional options such as:
+// Timestamp, Retention, ChunkSize, Uncompressed and Labels.
+// For more information - https://redis.io/commands/ts.decrby/
+func (c cmdable) TSDecrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd {
+ args := []interface{}{"TS.DECRBY", key, timestamp}
+ if options != nil {
+ if options.Timestamp != 0 {
+ args = append(args, "TIMESTAMP", options.Timestamp)
+ }
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Uncompressed {
+ args = append(args, "UNCOMPRESSED")
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDel - Deletes a range of samples from a time-series key.
+// For more information - https://redis.io/commands/ts.del/
+func (c cmdable) TSDel(ctx context.Context, Key string, fromTimestamp int, toTimestamp int) *IntCmd {
+ args := []interface{}{"TS.DEL", Key, fromTimestamp, toTimestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDeleteRule - Deletes a compaction rule from sourceKey to destKey.
+// For more information - https://redis.io/commands/ts.deleterule/
+func (c cmdable) TSDeleteRule(ctx context.Context, sourceKey string, destKey string) *StatusCmd {
+ args := []interface{}{"TS.DELETERULE", sourceKey, destKey}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSGetWithArgs - Gets the last sample of a time-series key with additional option.
+// This function allows for specifying additional option such as:
+// Latest.
+// For more information - https://redis.io/commands/ts.get/
+func (c cmdable) TSGetWithArgs(ctx context.Context, key string, options *TSGetOptions) *TSTimestampValueCmd {
+ args := []interface{}{"TS.GET", key}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ }
+ cmd := newTSTimestampValueCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSGet - Gets the last sample of a time-series key.
+// For more information - https://redis.io/commands/ts.get/
+func (c cmdable) TSGet(ctx context.Context, key string) *TSTimestampValueCmd {
+ args := []interface{}{"TS.GET", key}
+ cmd := newTSTimestampValueCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TSTimestampValue struct {
+ Timestamp int64
+ Value float64
+}
+type TSTimestampValueCmd struct {
+ baseCmd
+ val TSTimestampValue
+}
+
+func newTSTimestampValueCmd(ctx context.Context, args ...interface{}) *TSTimestampValueCmd {
+ return &TSTimestampValueCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TSTimestampValueCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TSTimestampValueCmd) SetVal(val TSTimestampValue) {
+ cmd.val = val
+}
+
+func (cmd *TSTimestampValueCmd) Result() (TSTimestampValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TSTimestampValueCmd) Val() TSTimestampValue {
+ return cmd.val
+}
+
+func (cmd *TSTimestampValueCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = TSTimestampValue{}
+ for i := 0; i < n; i++ {
+ timestamp, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val.Timestamp = timestamp
+ cmd.val.Value, err = strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TSInfo - Returns information about a time-series key.
+// For more information - https://redis.io/commands/ts.info/
+func (c cmdable) TSInfo(ctx context.Context, key string) *MapStringInterfaceCmd {
+ args := []interface{}{"TS.INFO", key}
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSInfoWithArgs - Returns information about a time-series key with additional option.
+// This function allows for specifying additional option such as:
+// Debug.
+// For more information - https://redis.io/commands/ts.info/
+func (c cmdable) TSInfoWithArgs(ctx context.Context, key string, options *TSInfoOptions) *MapStringInterfaceCmd {
+ args := []interface{}{"TS.INFO", key}
+ if options != nil {
+ if options.Debug {
+ args = append(args, "DEBUG")
+ }
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMAdd - Adds multiple samples to multiple time-series keys.
+// It accepts a slice of 'ktv' slices, each containing exactly three elements: key, timestamp, and value.
+// This struct must be provided for this command to work.
+// For more information - https://redis.io/commands/ts.madd/
+func (c cmdable) TSMAdd(ctx context.Context, ktvSlices [][]interface{}) *IntSliceCmd {
+ args := []interface{}{"TS.MADD"}
+ for _, ktv := range ktvSlices {
+ args = append(args, ktv...)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSQueryIndex - Returns all the keys matching the filter expression.
+// For more information - https://redis.io/commands/ts.queryindex/
+func (c cmdable) TSQueryIndex(ctx context.Context, filterExpr []string) *StringSliceCmd {
+ args := []interface{}{"TS.QUERYINDEX"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRevRange - Returns a range of samples from a time-series key in reverse order.
+// For more information - https://redis.io/commands/ts.revrange/
+func (c cmdable) TSRevRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.REVRANGE", key, fromTimestamp, toTimestamp}
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRevRangeWithArgs - Returns a range of samples from a time-series key in reverse order with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, Count, Align, Aggregator,
+// BucketDuration, BucketTimestamp and Empty.
+// For more information - https://redis.io/commands/ts.revrange/
+func (c cmdable) TSRevRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRevRangeOptions) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.REVRANGE", key, fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRange - Returns a range of samples from a time-series key.
+// For more information - https://redis.io/commands/ts.range/
+func (c cmdable) TSRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.RANGE", key, fromTimestamp, toTimestamp}
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRangeWithArgs - Returns a range of samples from a time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, Count, Align, Aggregator,
+// BucketDuration, BucketTimestamp and Empty.
+// For more information - https://redis.io/commands/ts.range/
+func (c cmdable) TSRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRangeOptions) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.RANGE", key, fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TSTimestampValueSliceCmd struct {
+ baseCmd
+ val []TSTimestampValue
+}
+
+func newTSTimestampValueSliceCmd(ctx context.Context, args ...interface{}) *TSTimestampValueSliceCmd {
+ return &TSTimestampValueSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TSTimestampValueSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TSTimestampValueSliceCmd) SetVal(val []TSTimestampValue) {
+ cmd.val = val
+}
+
+func (cmd *TSTimestampValueSliceCmd) Result() ([]TSTimestampValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TSTimestampValueSliceCmd) Val() []TSTimestampValue {
+ return cmd.val
+}
+
+func (cmd *TSTimestampValueSliceCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]TSTimestampValue, n)
+ for i := 0; i < n; i++ {
+ _, _ = rd.ReadArrayLen()
+ timestamp, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Timestamp = timestamp
+ cmd.val[i].Value, err = strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TSMRange - Returns a range of samples from multiple time-series keys.
+// For more information - https://redis.io/commands/ts.mrange/
+func (c cmdable) TSMRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MRANGE", fromTimestamp, toTimestamp, "FILTER"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRangeWithArgs - Returns a range of samples from multiple time-series keys with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, WithLabels, SelectedLabels,
+// Count, Align, Aggregator, BucketDuration, BucketTimestamp,
+// Empty, GroupByLabel and Reducer.
+// For more information - https://redis.io/commands/ts.mrange/
+func (c cmdable) TSMRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRangeOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MRANGE", fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ if options != nil {
+ if options.GroupByLabel != nil {
+ args = append(args, "GROUPBY", options.GroupByLabel)
+ }
+ if options.Reducer != nil {
+ args = append(args, "REDUCE", options.Reducer)
+ }
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRevRange - Returns a range of samples from multiple time-series keys in reverse order.
+// For more information - https://redis.io/commands/ts.mrevrange/
+func (c cmdable) TSMRevRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MREVRANGE", fromTimestamp, toTimestamp, "FILTER"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRevRangeWithArgs - Returns a range of samples from multiple time-series keys in reverse order with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, WithLabels, SelectedLabels,
+// Count, Align, Aggregator, BucketDuration, BucketTimestamp,
+// Empty, GroupByLabel and Reducer.
+// For more information - https://redis.io/commands/ts.mrevrange/
+func (c cmdable) TSMRevRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRevRangeOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MREVRANGE", fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ if options != nil {
+ if options.GroupByLabel != nil {
+ args = append(args, "GROUPBY", options.GroupByLabel)
+ }
+ if options.Reducer != nil {
+ args = append(args, "REDUCE", options.Reducer)
+ }
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMGet - Returns the last sample of multiple time-series keys.
+// For more information - https://redis.io/commands/ts.mget/
+func (c cmdable) TSMGet(ctx context.Context, filters []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MGET", "FILTER"}
+ for _, f := range filters {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMGetWithArgs - Returns the last sample of multiple time-series keys with additional options.
+// This function allows for specifying additional options such as:
+// Latest, WithLabels and SelectedLabels.
+// For more information - https://redis.io/commands/ts.mget/
+func (c cmdable) TSMGetWithArgs(ctx context.Context, filters []string, options *TSMGetOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MGET"}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filters {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands_test.go
new file mode 100644
index 0000000..563f24e
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands_test.go
@@ -0,0 +1,940 @@
+package redis_test
+
+import (
+ "context"
+ "strings"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9"
+)
+
+var _ = Describe("RedisTimeseries commands", Label("timeseries"), func() {
+ ctx := context.TODO()
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(&redis.Options{Addr: rediStackAddr})
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should TSCreate and TSCreateWithArgs", Label("timeseries", "tscreate", "tscreateWithArgs"), func() {
+ result, err := client.TSCreate(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ // Test TSCreateWithArgs
+ opt := &redis.TSOptions{Retention: 5}
+ result, err = client.TSCreateWithArgs(ctx, "2", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"Redis": "Labs"}}
+ result, err = client.TSCreateWithArgs(ctx, "3", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"Time": "Series"}, Retention: 20}
+ result, err = client.TSCreateWithArgs(ctx, "4", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ resultInfo, err := client.TSInfo(ctx, "4").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["labels"].(map[interface{}]interface{})["Time"]).To(BeEquivalentTo("Series"))
+ // Test chunk size
+ opt = &redis.TSOptions{ChunkSize: 128}
+ result, err = client.TSCreateWithArgs(ctx, "ts-cs-1", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ resultInfo, err = client.TSInfo(ctx, "ts-cs-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128))
+ // Test duplicate policy
+ duplicate_policies := []string{"BLOCK", "LAST", "FIRST", "MIN", "MAX"}
+ for _, dup := range duplicate_policies {
+ keyName := "ts-dup-" + dup
+ opt = &redis.TSOptions{DuplicatePolicy: dup}
+ result, err = client.TSCreateWithArgs(ctx, keyName, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ resultInfo, err = client.TSInfo(ctx, keyName).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(strings.ToUpper(resultInfo["duplicatePolicy"].(string))).To(BeEquivalentTo(dup))
+
+ }
+ })
+ It("should TSAdd and TSAddWithArgs", Label("timeseries", "tsadd", "tsaddWithArgs"), func() {
+ result, err := client.TSAdd(ctx, "1", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ // Test TSAddWithArgs
+ opt := &redis.TSOptions{Retention: 10}
+ result, err = client.TSAddWithArgs(ctx, "2", 2, 3, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(2))
+ opt = &redis.TSOptions{Labels: map[string]string{"Redis": "Labs"}}
+ result, err = client.TSAddWithArgs(ctx, "3", 3, 2, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(3))
+ opt = &redis.TSOptions{Labels: map[string]string{"Redis": "Labs", "Time": "Series"}, Retention: 10}
+ result, err = client.TSAddWithArgs(ctx, "4", 4, 2, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(4))
+ resultInfo, err := client.TSInfo(ctx, "4").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["labels"].(map[interface{}]interface{})["Time"]).To(BeEquivalentTo("Series"))
+ // Test chunk size
+ opt = &redis.TSOptions{ChunkSize: 128}
+ result, err = client.TSAddWithArgs(ctx, "ts-cs-1", 1, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ resultInfo, err = client.TSInfo(ctx, "ts-cs-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128))
+ // Test duplicate policy
+ // LAST
+ opt = &redis.TSOptions{DuplicatePolicy: "LAST"}
+ result, err = client.TSAddWithArgs(ctx, "tsal-1", 1, 5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ result, err = client.TSAddWithArgs(ctx, "tsal-1", 1, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ resultGet, err := client.TSGet(ctx, "tsal-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet.Value).To(BeEquivalentTo(10))
+ // FIRST
+ opt = &redis.TSOptions{DuplicatePolicy: "FIRST"}
+ result, err = client.TSAddWithArgs(ctx, "tsaf-1", 1, 5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ result, err = client.TSAddWithArgs(ctx, "tsaf-1", 1, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ resultGet, err = client.TSGet(ctx, "tsaf-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet.Value).To(BeEquivalentTo(5))
+ // MAX
+ opt = &redis.TSOptions{DuplicatePolicy: "MAX"}
+ result, err = client.TSAddWithArgs(ctx, "tsam-1", 1, 5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ result, err = client.TSAddWithArgs(ctx, "tsam-1", 1, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ resultGet, err = client.TSGet(ctx, "tsam-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet.Value).To(BeEquivalentTo(10))
+ // MIN
+ opt = &redis.TSOptions{DuplicatePolicy: "MIN"}
+ result, err = client.TSAddWithArgs(ctx, "tsami-1", 1, 5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ result, err = client.TSAddWithArgs(ctx, "tsami-1", 1, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ resultGet, err = client.TSGet(ctx, "tsami-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet.Value).To(BeEquivalentTo(5))
+ })
+
+ It("should TSAlter", Label("timeseries", "tsalter"), func() {
+ result, err := client.TSCreate(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ resultInfo, err := client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["retentionTime"]).To(BeEquivalentTo(0))
+
+ opt := &redis.TSAlterOptions{Retention: 10}
+ resultAlter, err := client.TSAlter(ctx, "1", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAlter).To(BeEquivalentTo("OK"))
+
+ resultInfo, err = client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["retentionTime"]).To(BeEquivalentTo(10))
+
+ resultInfo, err = client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["labels"]).To(BeEquivalentTo(map[interface{}]interface{}{}))
+
+ opt = &redis.TSAlterOptions{Labels: map[string]string{"Time": "Series"}}
+ resultAlter, err = client.TSAlter(ctx, "1", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAlter).To(BeEquivalentTo("OK"))
+
+ resultInfo, err = client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["labels"].(map[interface{}]interface{})["Time"]).To(BeEquivalentTo("Series"))
+ Expect(resultInfo["retentionTime"]).To(BeEquivalentTo(10))
+ Expect(resultInfo["duplicatePolicy"]).To(BeEquivalentTo(redis.Nil))
+ opt = &redis.TSAlterOptions{DuplicatePolicy: "min"}
+ resultAlter, err = client.TSAlter(ctx, "1", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAlter).To(BeEquivalentTo("OK"))
+
+ resultInfo, err = client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["duplicatePolicy"]).To(BeEquivalentTo("min"))
+ })
+
+ It("should TSCreateRule and TSDeleteRule", Label("timeseries", "tscreaterule", "tsdeleterule"), func() {
+ result, err := client.TSCreate(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ result, err = client.TSCreate(ctx, "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ result, err = client.TSCreateRule(ctx, "1", "2", redis.Avg, 100).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ for i := 0; i < 50; i++ {
+ resultAdd, err := client.TSAdd(ctx, "1", 100+i*2, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo(100 + i*2))
+ resultAdd, err = client.TSAdd(ctx, "1", 100+i*2+1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo(100 + i*2 + 1))
+
+ }
+ resultAdd, err := client.TSAdd(ctx, "1", 100*2, 1.5).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo(100 * 2))
+ resultGet, err := client.TSGet(ctx, "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet.Value).To(BeEquivalentTo(1.5))
+ Expect(resultGet.Timestamp).To(BeEquivalentTo(100))
+
+ resultDeleteRule, err := client.TSDeleteRule(ctx, "1", "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultDeleteRule).To(BeEquivalentTo("OK"))
+ resultInfo, err := client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["rules"]).To(BeEquivalentTo(map[interface{}]interface{}{}))
+ })
+
+ It("should TSIncrBy, TSIncrByWithArgs, TSDecrBy and TSDecrByWithArgs", Label("timeseries", "tsincrby", "tsdecrby", "tsincrbyWithArgs", "tsdecrbyWithArgs"), func() {
+ for i := 0; i < 100; i++ {
+ _, err := client.TSIncrBy(ctx, "1", 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ result, err := client.TSGet(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Value).To(BeEquivalentTo(100))
+
+ for i := 0; i < 100; i++ {
+ _, err := client.TSDecrBy(ctx, "1", 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ result, err = client.TSGet(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Value).To(BeEquivalentTo(0))
+
+ opt := &redis.TSIncrDecrOptions{Timestamp: 5}
+ _, err = client.TSIncrByWithArgs(ctx, "2", 1.5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err = client.TSGet(ctx, "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(5))
+ Expect(result.Value).To(BeEquivalentTo(1.5))
+
+ opt = &redis.TSIncrDecrOptions{Timestamp: 7}
+ _, err = client.TSIncrByWithArgs(ctx, "2", 2.25, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err = client.TSGet(ctx, "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(7))
+ Expect(result.Value).To(BeEquivalentTo(3.75))
+
+ opt = &redis.TSIncrDecrOptions{Timestamp: 15}
+ _, err = client.TSDecrByWithArgs(ctx, "2", 1.5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err = client.TSGet(ctx, "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(15))
+ Expect(result.Value).To(BeEquivalentTo(2.25))
+
+ // Test chunk size INCRBY
+ opt = &redis.TSIncrDecrOptions{ChunkSize: 128}
+ _, err = client.TSIncrByWithArgs(ctx, "3", 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ resultInfo, err := client.TSInfo(ctx, "3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128))
+
+ // Test chunk size DECRBY
+ opt = &redis.TSIncrDecrOptions{ChunkSize: 128}
+ _, err = client.TSDecrByWithArgs(ctx, "4", 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ resultInfo, err = client.TSInfo(ctx, "4").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128))
+ })
+
+ It("should TSGet", Label("timeseries", "tsget"), func() {
+ opt := &redis.TSOptions{DuplicatePolicy: "max"}
+ resultGet, err := client.TSAddWithArgs(ctx, "foo", 2265985, 151, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet).To(BeEquivalentTo(2265985))
+ result, err := client.TSGet(ctx, "foo").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(2265985))
+ Expect(result.Value).To(BeEquivalentTo(151))
+ })
+
+ It("should TSGet Latest", Label("timeseries", "tsgetlatest", "NonRedisEnterprise"), func() {
+ resultGet, err := client.TSCreate(ctx, "tsgl-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet).To(BeEquivalentTo("OK"))
+ resultGet, err = client.TSCreate(ctx, "tsgl-2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet).To(BeEquivalentTo("OK"))
+
+ resultGet, err = client.TSCreateRule(ctx, "tsgl-1", "tsgl-2", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(resultGet).To(BeEquivalentTo("OK"))
+ _, err = client.TSAdd(ctx, "tsgl-1", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "tsgl-1", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "tsgl-1", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "tsgl-1", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ result, errGet := client.TSGet(ctx, "tsgl-2").Result()
+ Expect(errGet).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(0))
+ Expect(result.Value).To(BeEquivalentTo(4))
+ result, errGet = client.TSGetWithArgs(ctx, "tsgl-2", &redis.TSGetOptions{Latest: true}).Result()
+ Expect(errGet).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(10))
+ Expect(result.Value).To(BeEquivalentTo(8))
+ })
+
+ It("should TSInfo", Label("timeseries", "tsinfo"), func() {
+ resultGet, err := client.TSAdd(ctx, "foo", 2265985, 151).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet).To(BeEquivalentTo(2265985))
+ result, err := client.TSInfo(ctx, "foo").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["firstTimestamp"]).To(BeEquivalentTo(2265985))
+ })
+
+ It("should TSMAdd", Label("timeseries", "tsmadd"), func() {
+ resultGet, err := client.TSCreate(ctx, "a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet).To(BeEquivalentTo("OK"))
+ ktvSlices := make([][]interface{}, 3)
+ for i := 0; i < 3; i++ {
+ ktvSlices[i] = make([]interface{}, 3)
+ ktvSlices[i][0] = "a"
+ for j := 1; j < 3; j++ {
+ ktvSlices[i][j] = (i + j) * j
+ }
+ }
+ result, err := client.TSMAdd(ctx, ktvSlices).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]int64{1, 2, 3}))
+ })
+
+ It("should TSMGet and TSMGetWithArgs", Label("timeseries", "tsmget", "tsmgetWithArgs", "NonRedisEnterprise"), func() {
+ opt := &redis.TSOptions{Labels: map[string]string{"Test": "This"}}
+ resultCreate, err := client.TSCreateWithArgs(ctx, "a", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ _, err = client.TSAdd(ctx, "a", "*", 15).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "b", "*", 25).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.TSMGet(ctx, []string{"Test=This"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][1].([]interface{})[1]).To(BeEquivalentTo(15))
+ Expect(result["b"][1].([]interface{})[1]).To(BeEquivalentTo(25))
+ mgetOpt := &redis.TSMGetOptions{WithLabels: true}
+ result, err = client.TSMGetWithArgs(ctx, []string{"Test=This"}, mgetOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["b"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"Test": "This", "Taste": "That"}))
+
+ resultCreate, err = client.TSCreate(ctx, "c").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "d", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ resultCreateRule, err := client.TSCreateRule(ctx, "c", "d", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreateRule).To(BeEquivalentTo("OK"))
+ _, err = client.TSAdd(ctx, "c", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ result, err = client.TSMGet(ctx, []string{"is_compaction=true"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["d"][1]).To(BeEquivalentTo([]interface{}{int64(0), 4.0}))
+ mgetOpt = &redis.TSMGetOptions{Latest: true}
+ result, err = client.TSMGetWithArgs(ctx, []string{"is_compaction=true"}, mgetOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["d"][1]).To(BeEquivalentTo([]interface{}{int64(10), 8.0}))
+ })
+
+ It("should TSQueryIndex", Label("timeseries", "tsqueryindex"), func() {
+ opt := &redis.TSOptions{Labels: map[string]string{"Test": "This"}}
+ resultCreate, err := client.TSCreateWithArgs(ctx, "a", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ result, err := client.TSQueryIndex(ctx, []string{"Test=This"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ result, err = client.TSQueryIndex(ctx, []string{"Taste=That"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(1))
+ })
+
+ It("should TSDel and TSRange", Label("timeseries", "tsdel", "tsrange"), func() {
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ resultDelete, err := client.TSDel(ctx, "a", 0, 21).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultDelete).To(BeEquivalentTo(22))
+
+ resultRange, err := client.TSRange(ctx, "a", 0, 21).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange).To(BeEquivalentTo([]redis.TSTimestampValue{}))
+
+ resultRange, err = client.TSRange(ctx, "a", 22, 22).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 22, Value: 1}))
+ })
+
+ It("should TSRange, TSRangeWithArgs", Label("timeseries", "tsrange", "tsrangeWithArgs", "NonRedisEnterprise"), func() {
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ }
+ result, err := client.TSRange(ctx, "a", 0, 200).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(100))
+ for i := 0; i < 100; i++ {
+ client.TSAdd(ctx, "a", i+200, float64(i%7))
+ }
+ result, err = client.TSRange(ctx, "a", 0, 500).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(200))
+ fts := make([]int, 0)
+ for i := 10; i < 20; i++ {
+ fts = append(fts, i)
+ }
+ opt := &redis.TSRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}}
+ result, err = client.TSRangeWithArgs(ctx, "a", 0, 500, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ opt = &redis.TSRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "+"}
+ result, err = client.TSRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 0, Value: 10}, {Timestamp: 10, Value: 1}}))
+ opt = &redis.TSRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "5"}
+ result, err = client.TSRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 0, Value: 5}, {Timestamp: 5, Value: 6}}))
+ opt = &redis.TSRangeOptions{Aggregator: redis.Twa, BucketDuration: 10}
+ result, err = client.TSRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 0, Value: 2.55}, {Timestamp: 10, Value: 3}}))
+ // Test Range Latest
+ resultCreate, err := client.TSCreate(ctx, "t1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ resultCreate, err = client.TSCreate(ctx, "t2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ resultRule, err := client.TSCreateRule(ctx, "t1", "t2", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRule).To(BeEquivalentTo("OK"))
+ _, errAdd := client.TSAdd(ctx, "t1", 1, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 2, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 11, 7).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 13, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ resultRange, err := client.TSRange(ctx, "t1", 0, 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 1, Value: 1}))
+
+ opt = &redis.TSRangeOptions{Latest: true}
+ resultRange, err = client.TSRangeWithArgs(ctx, "t2", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 0, Value: 4}))
+ // Test Bucket Timestamp
+ resultCreate, err = client.TSCreate(ctx, "t3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ _, errAdd = client.TSAdd(ctx, "t3", 15, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 17, 4).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 51, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 73, 5).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 75, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+
+ opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10}
+ resultRange, err = client.TSRangeWithArgs(ctx, "t3", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 4}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+
+ opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, BucketTimestamp: "+"}
+ resultRange, err = client.TSRangeWithArgs(ctx, "t3", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 20, Value: 4}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+ // Test Empty
+ _, errAdd = client.TSAdd(ctx, "t4", 15, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 17, 4).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 51, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 73, 5).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 75, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+
+ opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10}
+ resultRange, err = client.TSRangeWithArgs(ctx, "t4", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 4}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+
+ opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, Empty: true}
+ resultRange, err = client.TSRangeWithArgs(ctx, "t4", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 4}))
+ Expect(len(resultRange)).To(BeEquivalentTo(7))
+ })
+
+ It("should TSRevRange, TSRevRangeWithArgs", Label("timeseries", "tsrevrange", "tsrevrangeWithArgs", "NonRedisEnterprise"), func() {
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ }
+ result, err := client.TSRange(ctx, "a", 0, 200).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(100))
+ for i := 0; i < 100; i++ {
+ client.TSAdd(ctx, "a", i+200, float64(i%7))
+ }
+ result, err = client.TSRange(ctx, "a", 0, 500).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(200))
+
+ opt := &redis.TSRevRangeOptions{Aggregator: redis.Avg, BucketDuration: 10}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 500, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(20))
+
+ opt = &redis.TSRevRangeOptions{Count: 10}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 500, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(10))
+
+ fts := make([]int, 0)
+ for i := 10; i < 20; i++ {
+ fts = append(fts, i)
+ }
+ opt = &redis.TSRevRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 500, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "+"}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 10, Value: 1}, {Timestamp: 0, Value: 10}}))
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "1"}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1, Value: 10}, {Timestamp: 0, Value: 1}}))
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Twa, BucketDuration: 10}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 10, Value: 3}, {Timestamp: 0, Value: 2.55}}))
+ // Test Range Latest
+ resultCreate, err := client.TSCreate(ctx, "t1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ resultCreate, err = client.TSCreate(ctx, "t2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ resultRule, err := client.TSCreateRule(ctx, "t1", "t2", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRule).To(BeEquivalentTo("OK"))
+ _, errAdd := client.TSAdd(ctx, "t1", 1, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 2, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 11, 7).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 13, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ resultRange, err := client.TSRange(ctx, "t2", 0, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 0, Value: 4}))
+ opt = &redis.TSRevRangeOptions{Latest: true}
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t2", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 8}))
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t2", 0, 9, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 0, Value: 4}))
+ // Test Bucket Timestamp
+ resultCreate, err = client.TSCreate(ctx, "t3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ _, errAdd = client.TSAdd(ctx, "t3", 15, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 17, 4).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 51, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 73, 5).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 75, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10}
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t3", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 70, Value: 5}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, BucketTimestamp: "+"}
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t3", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 80, Value: 5}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+ // Test Empty
+ _, errAdd = client.TSAdd(ctx, "t4", 15, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 17, 4).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 51, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 73, 5).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 75, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10}
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t4", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 70, Value: 5}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, Empty: true}
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t4", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 70, Value: 5}))
+ Expect(len(resultRange)).To(BeEquivalentTo(7))
+ })
+
+ It("should TSMRange and TSMRangeWithArgs", Label("timeseries", "tsmrange", "tsmrangeWithArgs"), func() {
+ createOpt := &redis.TSOptions{Labels: map[string]string{"Test": "This", "team": "ny"}}
+ resultCreate, err := client.TSCreateWithArgs(ctx, "a", createOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ createOpt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That", "team": "sf"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", createOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "b", i, float64(i%11)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ result, err := client.TSMRange(ctx, 0, 200, []string{"Test=This"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(100))
+ // Test Count
+ mrangeOpt := &redis.TSMRangeOptions{Count: 10}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(10))
+ // Test Aggregation and BucketDuration
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i+200, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ mrangeOpt = &redis.TSMRangeOptions{Aggregator: redis.Avg, BucketDuration: 10}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 500, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(20))
+ // Test WithLabels
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{}))
+ mrangeOpt = &redis.TSMRangeOptions{WithLabels: true}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"Test": "This", "team": "ny"}))
+ // Test SelectedLabels
+ mrangeOpt = &redis.TSMRangeOptions{SelectedLabels: []interface{}{"team"}}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "ny"}))
+ Expect(result["b"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "sf"}))
+ // Test FilterBy
+ fts := make([]int, 0)
+ for i := 10; i < 20; i++ {
+ fts = append(fts, i)
+ }
+ mrangeOpt = &redis.TSMRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(15), 1.0}, []interface{}{int64(16), 2.0}}))
+ // Test GroupBy
+ mrangeOpt = &redis.TSMRangeOptions{GroupByLabel: "Test", Reducer: "sum"}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 2.0}, []interface{}{int64(2), 4.0}, []interface{}{int64(3), 6.0}}))
+
+ mrangeOpt = &redis.TSMRangeOptions{GroupByLabel: "Test", Reducer: "max"}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(3), 3.0}}))
+
+ mrangeOpt = &redis.TSMRangeOptions{GroupByLabel: "team", Reducer: "min"}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(result["team=ny"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(3), 3.0}}))
+ Expect(result["team=sf"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(3), 3.0}}))
+ // Test Align
+ mrangeOpt = &redis.TSMRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "-"}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 10.0}, []interface{}{int64(10), 1.0}}))
+
+ mrangeOpt = &redis.TSMRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: 5}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 5.0}, []interface{}{int64(5), 6.0}}))
+ })
+
+ It("should TSMRangeWithArgs Latest", Label("timeseries", "tsmrangeWithArgs", "tsmrangelatest", "NonRedisEnterprise"), func() {
+ resultCreate, err := client.TSCreate(ctx, "a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt := &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ resultCreate, err = client.TSCreate(ctx, "c").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "d", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ resultCreateRule, err := client.TSCreateRule(ctx, "a", "b", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreateRule).To(BeEquivalentTo("OK"))
+ resultCreateRule, err = client.TSCreateRule(ctx, "c", "d", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreateRule).To(BeEquivalentTo("OK"))
+
+ _, err = client.TSAdd(ctx, "a", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ _, err = client.TSAdd(ctx, "c", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ mrangeOpt := &redis.TSMRangeOptions{Latest: true}
+ result, err := client.TSMRangeWithArgs(ctx, 0, 10, []string{"is_compaction=true"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["b"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 4.0}, []interface{}{int64(10), 8.0}}))
+ Expect(result["d"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 4.0}, []interface{}{int64(10), 8.0}}))
+ })
+ It("should TSMRevRange and TSMRevRangeWithArgs", Label("timeseries", "tsmrevrange", "tsmrevrangeWithArgs"), func() {
+ createOpt := &redis.TSOptions{Labels: map[string]string{"Test": "This", "team": "ny"}}
+ resultCreate, err := client.TSCreateWithArgs(ctx, "a", createOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ createOpt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That", "team": "sf"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", createOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "b", i, float64(i%11)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ result, err := client.TSMRevRange(ctx, 0, 200, []string{"Test=This"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(100))
+ // Test Count
+ mrangeOpt := &redis.TSMRevRangeOptions{Count: 10}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(10))
+ // Test Aggregation and BucketDuration
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i+200, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ mrangeOpt = &redis.TSMRevRangeOptions{Aggregator: redis.Avg, BucketDuration: 10}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 500, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(20))
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{}))
+ // Test WithLabels
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{}))
+ mrangeOpt = &redis.TSMRevRangeOptions{WithLabels: true}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"Test": "This", "team": "ny"}))
+ // Test SelectedLabels
+ mrangeOpt = &redis.TSMRevRangeOptions{SelectedLabels: []interface{}{"team"}}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "ny"}))
+ Expect(result["b"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "sf"}))
+ // Test FilterBy
+ fts := make([]int, 0)
+ for i := 10; i < 20; i++ {
+ fts = append(fts, i)
+ }
+ mrangeOpt = &redis.TSMRevRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(16), 2.0}, []interface{}{int64(15), 1.0}}))
+ // Test GroupBy
+ mrangeOpt = &redis.TSMRevRangeOptions{GroupByLabel: "Test", Reducer: "sum"}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 6.0}, []interface{}{int64(2), 4.0}, []interface{}{int64(1), 2.0}, []interface{}{int64(0), 0.0}}))
+
+ mrangeOpt = &redis.TSMRevRangeOptions{GroupByLabel: "Test", Reducer: "max"}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 3.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(0), 0.0}}))
+
+ mrangeOpt = &redis.TSMRevRangeOptions{GroupByLabel: "team", Reducer: "min"}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(result["team=ny"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 3.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(0), 0.0}}))
+ Expect(result["team=sf"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 3.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(0), 0.0}}))
+ // Test Align
+ mrangeOpt = &redis.TSMRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "-"}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), 1.0}, []interface{}{int64(0), 10.0}}))
+
+ mrangeOpt = &redis.TSMRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: 1}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(1), 10.0}, []interface{}{int64(0), 1.0}}))
+ })
+
+ It("should TSMRevRangeWithArgs Latest", Label("timeseries", "tsmrevrangeWithArgs", "tsmrevrangelatest", "NonRedisEnterprise"), func() {
+ resultCreate, err := client.TSCreate(ctx, "a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt := &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ resultCreate, err = client.TSCreate(ctx, "c").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "d", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ resultCreateRule, err := client.TSCreateRule(ctx, "a", "b", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreateRule).To(BeEquivalentTo("OK"))
+ resultCreateRule, err = client.TSCreateRule(ctx, "c", "d", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreateRule).To(BeEquivalentTo("OK"))
+
+ _, err = client.TSAdd(ctx, "a", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ _, err = client.TSAdd(ctx, "c", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ mrangeOpt := &redis.TSMRevRangeOptions{Latest: true}
+ result, err := client.TSMRevRangeWithArgs(ctx, 0, 10, []string{"is_compaction=true"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["b"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), 8.0}, []interface{}{int64(0), 4.0}}))
+ Expect(result["d"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), 8.0}, []interface{}{int64(0), 4.0}}))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx.go
index 8c9d872..039eaf3 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx.go
@@ -3,8 +3,8 @@ package redis
import (
"context"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
)
// TxFailedErr transaction redis failed.
@@ -19,18 +19,16 @@ type Tx struct {
baseClient
cmdable
statefulCmdable
- hooks
- ctx context.Context
+ hooksMixin
}
-func (c *Client) newTx(ctx context.Context) *Tx {
+func (c *Client) newTx() *Tx {
tx := Tx{
baseClient: baseClient{
opt: c.opt,
connPool: pool.NewStickyConnPool(c.connPool),
},
- hooks: c.hooks.clone(),
- ctx: ctx,
+ hooksMixin: c.hooksMixin.clone(),
}
tx.init()
return &tx
@@ -39,25 +37,19 @@ func (c *Client) newTx(ctx context.Context) *Tx {
func (c *Tx) init() {
c.cmdable = c.Process
c.statefulCmdable = c.Process
-}
-
-func (c *Tx) Context() context.Context {
- return c.ctx
-}
-func (c *Tx) WithContext(ctx context.Context) *Tx {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.init()
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
}
func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
}
// Watch prepares a transaction and marks the keys to be watched
@@ -65,7 +57,7 @@ func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
//
// The transaction is automatically closed when fn exits.
func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
- tx := c.newTx(ctx)
+ tx := c.newTx()
defer tx.Close(ctx)
if len(keys) > 0 {
if err := tx.Watch(ctx, keys...).Err(); err != nil {
@@ -109,9 +101,8 @@ func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
func (c *Tx) Pipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
exec: func(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+ return c.processPipelineHook(ctx, cmds)
},
}
pipe.init()
@@ -139,11 +130,22 @@ func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder
// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
func (c *Tx) TxPipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
exec: func(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
},
}
pipe.init()
return &pipe
}
+
+func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
+ if len(cmds) == 0 {
+ panic("not reached")
+ }
+ cmdsCopy := make([]Cmder, len(cmds)+2)
+ cmdsCopy[0] = NewStatusCmd(ctx, "multi")
+ copy(cmdsCopy[1:], cmds)
+ cmdsCopy[len(cmdsCopy)-1] = NewSliceCmd(ctx, "exec")
+ return cmdsCopy
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx_test.go
index 7deb2df..1146d46 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx_test.go
@@ -5,10 +5,10 @@ import (
"strconv"
"sync"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("Tx", func() {
@@ -64,7 +64,7 @@ var _ = Describe("Tx", func() {
Expect(n).To(Equal(int64(100)))
})
- It("should discard", func() {
+ It("should discard", Label("NonRedisEnterprise"), func() {
err := client.Watch(ctx, func(tx *redis.Tx) error {
cmds, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, "key1", "hello1", 0)
@@ -143,9 +143,6 @@ var _ = Describe("Tx", func() {
}
err = do()
- Expect(err).To(MatchError("bad connection"))
-
- err = do()
Expect(err).NotTo(HaveOccurred())
})
})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal.go
index c89b3e5..275bef3 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal.go
@@ -14,6 +14,9 @@ type UniversalOptions struct {
// of cluster/sentinel nodes.
Addrs []string
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
// Database to be selected after connecting to the server.
// Only single-node and failover clients.
DB int
@@ -23,6 +26,7 @@ type UniversalOptions struct {
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
OnConnect func(ctx context.Context, cn *Conn) error
+ Protocol int
Username string
Password string
SentinelUsername string
@@ -32,19 +36,21 @@ type UniversalOptions struct {
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
TLSConfig *tls.Config
@@ -59,6 +65,9 @@ type UniversalOptions struct {
// Only failover clients.
MasterName string
+
+ DisableIndentity bool
+ IdentitySuffix string
}
// Cluster returns cluster options created from the universal options.
@@ -68,10 +77,12 @@ func (o *UniversalOptions) Cluster() *ClusterOptions {
}
return &ClusterOptions{
- Addrs: o.Addrs,
- Dialer: o.Dialer,
- OnConnect: o.OnConnect,
+ Addrs: o.Addrs,
+ ClientName: o.ClientName,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+ Protocol: o.Protocol,
Username: o.Username,
Password: o.Password,
@@ -84,18 +95,25 @@ func (o *UniversalOptions) Cluster() *ClusterOptions {
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
+
+ PoolFIFO: o.PoolFIFO,
+
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
}
}
@@ -108,11 +126,13 @@ func (o *UniversalOptions) Failover() *FailoverOptions {
return &FailoverOptions{
SentinelAddrs: o.Addrs,
MasterName: o.MasterName,
+ ClientName: o.ClientName,
Dialer: o.Dialer,
OnConnect: o.OnConnect,
DB: o.DB,
+ Protocol: o.Protocol,
Username: o.Username,
Password: o.Password,
SentinelUsername: o.SentinelUsername,
@@ -122,19 +142,24 @@ func (o *UniversalOptions) Failover() *FailoverOptions {
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
}
}
@@ -146,11 +171,13 @@ func (o *UniversalOptions) Simple() *Options {
}
return &Options{
- Addr: addr,
- Dialer: o.Dialer,
- OnConnect: o.OnConnect,
+ Addr: addr,
+ ClientName: o.ClientName,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
DB: o.DB,
+ Protocol: o.Protocol,
Username: o.Username,
Password: o.Password,
@@ -158,19 +185,24 @@ func (o *UniversalOptions) Simple() *Options {
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
}
}
@@ -182,13 +214,13 @@ func (o *UniversalOptions) Simple() *Options {
// clients in different environments.
type UniversalClient interface {
Cmdable
- Context() context.Context
AddHook(Hook)
Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
Do(ctx context.Context, args ...interface{}) *Cmd
Process(ctx context.Context, cmd Cmder) error
Subscribe(ctx context.Context, channels ...string) *PubSub
PSubscribe(ctx context.Context, channels ...string) *PubSub
+ SSubscribe(ctx context.Context, channels ...string) *PubSub
Close() error
PoolStats() *PoolStats
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal_test.go
index 7491a1d..747c68a 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal_test.go
@@ -1,10 +1,10 @@
package redis_test
import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("UniversalClient", func() {
@@ -17,6 +17,7 @@ var _ = Describe("UniversalClient", func() {
})
It("should connect to failover servers", func() {
+ Skip("Flaky Test")
client = redis.NewUniversalClient(&redis.UniversalOptions{
MasterName: sentinelName,
Addrs: sentinelAddrs,
@@ -31,7 +32,7 @@ var _ = Describe("UniversalClient", func() {
Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
})
- It("should connect to clusters", func() {
+ It("should connect to clusters", Label("NonRedisEnterprise"), func() {
client = redis.NewUniversalClient(&redis.UniversalOptions{
Addrs: cluster.addrs(),
})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/version.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/version.go
index 112c9a2..e2c7f3e 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/version.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/version.go
@@ -2,5 +2,5 @@ package redis
// Version is the current release version.
func Version() string {
- return "8.11.5"
+ return "9.5.1"
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/Makefile b/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/Makefile
deleted file mode 100644
index 518c3fa..0000000
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/Makefile
+++ /dev/null
@@ -1,82 +0,0 @@
-export GOBIN ?= $(shell pwd)/bin
-
-REVIVE = $(GOBIN)/revive
-STATICCHECK = $(GOBIN)/staticcheck
-GOVULNCHECK = $(GOBIN)/govulncheck
-BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
-
-# Directories containing independent Go modules.
-#
-# We track coverage only for the main module.
-MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test
-
-# Many Go tools take file globs or directories as arguments instead of packages.
-GO_FILES := $(shell \
- find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
- -o -name '*.go' -print | cut -b3-)
-
-.PHONY: all
-all: lint test
-
-.PHONY: lint
-lint: $(REVIVE) $(STATICCHECK)
- @rm -rf lint.log
- @echo "Checking formatting..."
- @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
- @echo "Checking vet..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking lint..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && \
- $(REVIVE) -set_exit_status ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking staticcheck..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking for unresolved FIXMEs..."
- @git grep -i fixme | grep -v -e Makefile | tee -a lint.log
- @echo "Checking for license headers..."
- @./checklicense.sh | tee -a lint.log
- @[ ! -s lint.log ]
- @echo "Checking 'go mod tidy'..."
- @make tidy
- @if ! git diff --quiet; then \
- echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
- git --no-pager diff; \
- fi
-
-$(REVIVE):
- cd tools && go install github.com/mgechev/revive
-
-$(GOVULNCHECK):
- cd tools && go install golang.org/x/vuln/cmd/govulncheck
-
-$(STATICCHECK):
- cd tools && go install honnef.co/go/tools/cmd/staticcheck
-
-.PHONY: test
-test:
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true
-
-.PHONY: cover
-cover:
- go test -race -coverprofile=cover.out -coverpkg=./... ./...
- go tool cover -html=cover.out -o cover.html
-
-.PHONY: bench
-BENCH ?= .
-bench:
- @$(foreach dir,$(MODULE_DIRS), ( \
- cd $(dir) && \
- go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \
- ) &&) true
-
-.PHONY: updatereadme
-updatereadme:
- rm -f README.md
- cat .readme.tmpl | go run internal/readme/readme.go > README.md
-
-.PHONY: tidy
-tidy:
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true
-
-.PHONY: vulncheck
-vulncheck: $(GOVULNCHECK)
- $(GOVULNCHECK) ./... \ No newline at end of file
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array_go118.go b/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array_go118.go
deleted file mode 100644
index d0d2c49..0000000
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array_go118.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright (c) 2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build go1.18
-// +build go1.18
-
-package zap
-
-import (
- "fmt"
-
- "go.uber.org/zap/zapcore"
-)
-
-// Objects constructs a field with the given key, holding a list of the
-// provided objects that can be marshaled by Zap.
-//
-// Note that these objects must implement zapcore.ObjectMarshaler directly.
-// That is, if you're trying to marshal a []Request, the MarshalLogObject
-// method must be declared on the Request type, not its pointer (*Request).
-// If it's on the pointer, use ObjectValues.
-//
-// Given an object that implements MarshalLogObject on the value receiver, you
-// can log a slice of those objects with Objects like so:
-//
-// type Author struct{ ... }
-// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
-//
-// var authors []Author = ...
-// logger.Info("loading article", zap.Objects("authors", authors))
-//
-// Similarly, given a type that implements MarshalLogObject on its pointer
-// receiver, you can log a slice of pointers to that object with Objects like
-// so:
-//
-// type Request struct{ ... }
-// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
-//
-// var requests []*Request = ...
-// logger.Info("sending requests", zap.Objects("requests", requests))
-//
-// If instead, you have a slice of values of such an object, use the
-// ObjectValues constructor.
-//
-// var requests []Request = ...
-// logger.Info("sending requests", zap.ObjectValues("requests", requests))
-func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
- return Array(key, objects[T](values))
-}
-
-type objects[T zapcore.ObjectMarshaler] []T
-
-func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for _, o := range os {
- if err := arr.AppendObject(o); err != nil {
- return err
- }
- }
- return nil
-}
-
-// ObjectMarshalerPtr is a constraint that specifies that the given type
-// implements zapcore.ObjectMarshaler on a pointer receiver.
-type ObjectMarshalerPtr[T any] interface {
- *T
- zapcore.ObjectMarshaler
-}
-
-// ObjectValues constructs a field with the given key, holding a list of the
-// provided objects, where pointers to these objects can be marshaled by Zap.
-//
-// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
-// That is, if you're trying to marshal a []Request, the MarshalLogObject
-// method must be declared on the *Request type, not the value (Request).
-// If it's on the value, use Objects.
-//
-// Given an object that implements MarshalLogObject on the pointer receiver,
-// you can log a slice of those objects with ObjectValues like so:
-//
-// type Request struct{ ... }
-// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
-//
-// var requests []Request = ...
-// logger.Info("sending requests", zap.ObjectValues("requests", requests))
-//
-// If instead, you have a slice of pointers of such an object, use the Objects
-// field constructor.
-//
-// var requests []*Request = ...
-// logger.Info("sending requests", zap.Objects("requests", requests))
-func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
- return Array(key, objectValues[T, P](values))
-}
-
-type objectValues[T any, P ObjectMarshalerPtr[T]] []T
-
-func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range os {
- // It is necessary for us to explicitly reference the "P" type.
- // We cannot simply pass "&os[i]" to AppendObject because its type
- // is "*T", which the type system does not consider as
- // implementing ObjectMarshaler.
- // Only the type "P" satisfies ObjectMarshaler, which we have
- // to convert "*T" to explicitly.
- var p P = &os[i]
- if err := arr.AppendObject(p); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Stringers constructs a field with the given key, holding a list of the
-// output provided by the value's String method
-//
-// Given an object that implements String on the value receiver, you
-// can log a slice of those objects with Objects like so:
-//
-// type Request struct{ ... }
-// func (a Request) String() string
-//
-// var requests []Request = ...
-// logger.Info("sending requests", zap.Stringers("requests", requests))
-//
-// Note that these objects must implement fmt.Stringer directly.
-// That is, if you're trying to marshal a []Request, the String method
-// must be declared on the Request type, not its pointer (*Request).
-func Stringers[T fmt.Stringer](key string, values []T) Field {
- return Array(key, stringers[T](values))
-}
-
-type stringers[T fmt.Stringer] []T
-
-func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for _, o := range os {
- arr.AppendString(o.String())
- }
- return nil
-}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array_go118_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array_go118_test.go
deleted file mode 100644
index e4c6274..0000000
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array_go118_test.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright (c) 2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build go1.18
-// +build go1.18
-
-package zap
-
-import (
- "errors"
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap/zapcore"
-)
-
-func TestObjectsAndObjectValues(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- desc string
- give Field
- want []any
- }{
- {
- desc: "Objects/nil slice",
- give: Objects[*emptyObject]("", nil),
- want: []any{},
- },
- {
- desc: "ObjectValues/nil slice",
- give: ObjectValues[emptyObject]("", nil),
- want: []any{},
- },
- {
- desc: "ObjectValues/empty slice",
- give: ObjectValues("", []emptyObject{}),
- want: []any{},
- },
- {
- desc: "ObjectValues/single item",
- give: ObjectValues("", []emptyObject{
- {},
- }),
- want: []any{
- map[string]any{},
- },
- },
- {
- desc: "Objects/multiple different objects",
- give: Objects("", []*fakeObject{
- {value: "foo"},
- {value: "bar"},
- {value: "baz"},
- }),
- want: []any{
- map[string]any{"value": "foo"},
- map[string]any{"value": "bar"},
- map[string]any{"value": "baz"},
- },
- },
- {
- desc: "ObjectValues/multiple different objects",
- give: ObjectValues("", []fakeObject{
- {value: "foo"},
- {value: "bar"},
- {value: "baz"},
- }),
- want: []any{
- map[string]any{"value": "foo"},
- map[string]any{"value": "bar"},
- map[string]any{"value": "baz"},
- },
- },
- }
-
- for _, tt := range tests {
- tt := tt
- t.Run(tt.desc, func(t *testing.T) {
- t.Parallel()
-
- tt.give.Key = "k"
-
- enc := zapcore.NewMapObjectEncoder()
- tt.give.AddTo(enc)
- assert.Equal(t, tt.want, enc.Fields["k"])
- })
- }
-}
-
-type emptyObject struct{}
-
-func (*emptyObject) MarshalLogObject(zapcore.ObjectEncoder) error {
- return nil
-}
-
-type fakeObject struct {
- value string
- err error // marshaling error, if any
-}
-
-func (o *fakeObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
- enc.AddString("value", o.value)
- return o.err
-}
-
-func TestObjectsAndObjectValues_marshalError(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- desc string
- give Field
- want []any
- wantErr string
- }{
- {
- desc: "Objects",
- give: Objects("", []*fakeObject{
- {value: "foo"},
- {value: "bar", err: errors.New("great sadness")},
- {value: "baz"}, // does not get marshaled
- }),
- want: []any{
- map[string]any{"value": "foo"},
- map[string]any{"value": "bar"},
- },
- wantErr: "great sadness",
- },
- {
- desc: "ObjectValues",
- give: ObjectValues("", []fakeObject{
- {value: "foo"},
- {value: "bar", err: errors.New("stuff failed")},
- {value: "baz"}, // does not get marshaled
- }),
- want: []any{
- map[string]any{"value": "foo"},
- map[string]any{"value": "bar"},
- },
- wantErr: "stuff failed",
- },
- }
-
- for _, tt := range tests {
- tt := tt
- t.Run(tt.desc, func(t *testing.T) {
- t.Parallel()
-
- tt.give.Key = "k"
-
- enc := zapcore.NewMapObjectEncoder()
- tt.give.AddTo(enc)
-
- require.Contains(t, enc.Fields, "k")
- assert.Equal(t, tt.want, enc.Fields["k"])
-
- // AddTo puts the error in a "%vError" field based on the name of the
- // original field.
- require.Contains(t, enc.Fields, "kError")
- assert.Equal(t, tt.wantErr, enc.Fields["kError"])
- })
- }
-}
-
-type stringerObject struct {
- value string
-}
-
-func (s stringerObject) String() string {
- return s.value
-}
-
-func TestStringers(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- desc string
- give Field
- want []any
- }{
- {
- desc: "Stringers",
- give: Stringers("", []stringerObject{
- {value: "foo"},
- {value: "bar"},
- {value: "baz"},
- }),
- want: []any{
- "foo",
- "bar",
- "baz",
- },
- },
- {
- desc: "Stringers with []fmt.Stringer",
- give: Stringers("", []fmt.Stringer{
- stringerObject{value: "foo"},
- stringerObject{value: "bar"},
- stringerObject{value: "baz"},
- }),
- want: []any{
- "foo",
- "bar",
- "baz",
- },
- },
- }
-
- for _, tt := range tests {
- tt := tt
- t.Run(tt.desc, func(t *testing.T) {
- t.Parallel()
-
- tt.give.Key = "k"
-
- enc := zapcore.NewMapObjectEncoder()
- tt.give.AddTo(enc)
- assert.Equal(t, tt.want, enc.Fields["k"])
- })
- }
-}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/example_go118_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/example_go118_test.go
deleted file mode 100644
index dc25370..0000000
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/example_go118_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build go1.18
-// +build go1.18
-
-package zap_test
-
-import "go.uber.org/zap"
-
-func ExampleObjects() {
- logger := zap.NewExample()
- defer logger.Sync()
-
- // Use the Objects field constructor when you have a list of objects,
- // all of which implement zapcore.ObjectMarshaler.
- logger.Debug("opening connections",
- zap.Objects("addrs", []addr{
- {IP: "123.45.67.89", Port: 4040},
- {IP: "127.0.0.1", Port: 4041},
- {IP: "192.168.0.1", Port: 4042},
- }))
- // Output:
- // {"level":"debug","msg":"opening connections","addrs":[{"ip":"123.45.67.89","port":4040},{"ip":"127.0.0.1","port":4041},{"ip":"192.168.0.1","port":4042}]}
-}
-
-func ExampleObjectValues() {
- logger := zap.NewExample()
- defer logger.Sync()
-
- // Use the ObjectValues field constructor when you have a list of
- // objects that do not implement zapcore.ObjectMarshaler directly,
- // but on their pointer receivers.
- logger.Debug("starting tunnels",
- zap.ObjectValues("addrs", []request{
- {
- URL: "/foo",
- Listen: addr{"127.0.0.1", 8080},
- Remote: addr{"123.45.67.89", 4040},
- },
- {
- URL: "/bar",
- Listen: addr{"127.0.0.1", 8080},
- Remote: addr{"127.0.0.1", 31200},
- },
- }))
- // Output:
- // {"level":"debug","msg":"starting tunnels","addrs":[{"url":"/foo","ip":"127.0.0.1","port":8080,"remote":{"ip":"123.45.67.89","port":4040}},{"url":"/bar","ip":"127.0.0.1","port":8080,"remote":{"ip":"127.0.0.1","port":31200}}]}
-}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.codecov.yml b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.codecov.yml
index 8e5ca7d..8e5ca7d 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.codecov.yml
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.codecov.yml
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/ISSUE_TEMPLATE/bug_report.md b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/ISSUE_TEMPLATE/bug_report.md
index 96fe902..96fe902 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/ISSUE_TEMPLATE/bug_report.md
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/ISSUE_TEMPLATE/config.yml b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/ISSUE_TEMPLATE/config.yml
index 917a641..917a641 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/ISSUE_TEMPLATE/config.yml
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/ISSUE_TEMPLATE/config.yml
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/ISSUE_TEMPLATE/feature_request.md b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/ISSUE_TEMPLATE/feature_request.md
index 8a1ef5c..8a1ef5c 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/ISSUE_TEMPLATE/feature_request.md
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/dependabot.yml b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/dependabot.yml
index 33ac821..33ac821 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/dependabot.yml
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/dependabot.yml
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/workflows/fossa.yaml b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/workflows/fossa.yaml
index 6c91435..3da3e0d 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/workflows/fossa.yaml
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/workflows/fossa.yaml
@@ -11,7 +11,7 @@ jobs:
if: github.repository_owner == 'uber-go'
steps:
- name: Checkout code
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: FOSSA analysis
uses: fossas/fossa-action@v1
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/workflows/go.yml b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/workflows/go.yml
index c4fb393..2fa9a69 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.github/workflows/go.yml
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.github/workflows/go.yml
@@ -2,7 +2,7 @@ name: Go
on:
push:
- branches: ['*']
+ branches: [master]
tags: ['v*']
pull_request:
branches: ['*']
@@ -16,20 +16,18 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- go: ["1.19.x", "1.20.x"]
+ go: ["1.21.x", "1.22.x"]
include:
- - go: 1.20.x
- latest: true
+ - go: 1.22.x
steps:
- name: Checkout code
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Setup Go
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go }}
- cache: true
cache-dependency-path: '**/go.sum'
- name: Download Dependencies
@@ -39,16 +37,33 @@ jobs:
(cd benchmarks && go mod download)
(cd zapgrpc/internal/test && go mod download)
- - name: Lint
- if: matrix.latest
- run: make lint
-
- name: Test
run: make cover
- name: Upload coverage to codecov.io
uses: codecov/codecov-action@v3
+ lint:
+ name: Lint
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ name: Check out repository
+ - uses: actions/setup-go@v4
+ name: Set up Go
+ with:
+ go-version: 1.22.x
+ cache: false # managed by golangci-lint
+
+ - uses: golangci/golangci-lint-action@v4
+ name: Install golangci-lint
+ with:
+ version: latest
+ args: --version # make lint will run the linter
+
+ - run: make lint
+ name: Lint
+
- name: vulncheck
- if: matrix.latest
run: make vulncheck
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.gitignore b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.gitignore
index da9d9d0..da9d9d0 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.gitignore
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.gitignore
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.golangci.yml b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.golangci.yml
new file mode 100644
index 0000000..2346df1
--- /dev/null
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.golangci.yml
@@ -0,0 +1,77 @@
+output:
+ # Make output more digestible with quickfix in vim/emacs/etc.
+ sort-results: true
+ print-issued-lines: false
+
+linters:
+ # We'll track the golangci-lint default linters manually
+ # instead of letting them change without our control.
+ disable-all: true
+ enable:
+ # golangci-lint defaults:
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - unused
+
+ # Our own extras:
+ - gofumpt
+ - nolintlint # lints nolint directives
+ - revive
+
+linters-settings:
+ govet:
+ # These govet checks are disabled by default, but they're useful.
+ enable:
+ - niliness
+ - reflectvaluecompare
+ - sortslice
+ - unusedwrite
+
+ errcheck:
+ exclude-functions:
+ # These methods can not fail.
+ # They operate on an in-memory buffer.
+ - (*go.uber.org/zap/buffer.Buffer).Write
+ - (*go.uber.org/zap/buffer.Buffer).WriteByte
+ - (*go.uber.org/zap/buffer.Buffer).WriteString
+
+ - (*go.uber.org/zap/zapio.Writer).Close
+ - (*go.uber.org/zap/zapio.Writer).Sync
+ - (*go.uber.org/zap/zapio.Writer).Write
+ # Write to zapio.Writer cannot fail,
+ # so io.WriteString on it cannot fail.
+ - io.WriteString(*go.uber.org/zap/zapio.Writer)
+
+ # Writing a plain string to a fmt.State cannot fail.
+ - io.WriteString(fmt.State)
+
+issues:
+ # Print all issues reported by all linters.
+ max-issues-per-linter: 0
+ max-same-issues: 0
+
+ # Don't ignore some of the issues that golangci-lint considers okay.
+ # This includes documenting all exported entities.
+ exclude-use-default: false
+
+ exclude-rules:
+ # Don't warn on unused parameters.
+ # Parameter names are useful; replacing them with '_' is undesirable.
+ - linters: [revive]
+ text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _'
+
+ # staticcheck already has smarter checks for empty blocks.
+ # revive's empty-block linter has false positives.
+ # For example, as of writing this, the following is not allowed.
+ # for foo() { }
+ - linters: [revive]
+ text: 'empty-block: this block is empty, you can remove it'
+
+ # Ignore logger.Sync() errcheck failures in example_test.go
+ # since those are intended to be uncomplicated examples.
+ - linters: [errcheck]
+ path: example_test.go
+ text: 'Error return value of `logger.Sync` is not checked'
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.readme.tmpl b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.readme.tmpl
index 92aa65d..4fea302 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/.readme.tmpl
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/.readme.tmpl
@@ -1,7 +1,15 @@
# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+<div align="center">
+
Blazing fast, structured, leveled logging in Go.
+![Zap logo](assets/logo.png)
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+</div>
+
## Installation
`go get -u go.uber.org/zap`
@@ -92,7 +100,7 @@ standard.
<hr>
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/CHANGELOG.md b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/CHANGELOG.md
index fe57bc0..6d6cd5f 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/CHANGELOG.md
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/CHANGELOG.md
@@ -1,7 +1,34 @@
# Changelog
All notable changes to this project will be documented in this file.
-This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## 1.27.0 (20 Feb 2024)
+Enhancements:
+* [#1378][]: Add `WithLazy` method for `SugaredLogger`.
+* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`.
+* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`.
+* [#1416][]: Add `WithPanicHook` option for testing panic logs.
+
+Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release.
+
+[#1378]: https://github.com/uber-go/zap/pull/1378
+[#1399]: https://github.com/uber-go/zap/pull/1399
+[#1406]: https://github.com/uber-go/zap/pull/1406
+[#1416]: https://github.com/uber-go/zap/pull/1416
+
+## 1.26.0 (14 Sep 2023)
+Enhancements:
+* [#1297][]: Add Dict as a Field.
+* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured
+context.
+* [#1350][]: String encoding is much (~50%) faster now.
+
+Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release.
+
+[#1297]: https://github.com/uber-go/zap/pull/1297
+[#1319]: https://github.com/uber-go/zap/pull/1319
+[#1350]: https://github.com/uber-go/zap/pull/1350
## 1.25.0 (1 Aug 2023)
@@ -14,7 +41,7 @@ Enhancements:
* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set.
* [#1281][]: Add `zap/exp/expfield` package which contains helper methods
`Str` and `Strs` for constructing String-like zap.Fields.
-* [#1310][]: Reduce stack size on `Any`.
+* [#1310][]: Reduce stack size on `Any`.
Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions
to this release.
@@ -48,7 +75,6 @@ Enhancements:
[#1147]: https://github.com/uber-go/zap/pull/1147
[#1155]: https://github.com/uber-go/zap/pull/1155
-
## 1.22.0 (8 Aug 2022)
Enhancements:
@@ -197,6 +223,16 @@ Enhancements:
Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
+[#865]: https://github.com/uber-go/zap/pull/865
+[#867]: https://github.com/uber-go/zap/pull/867
+[#881]: https://github.com/uber-go/zap/pull/881
+[#903]: https://github.com/uber-go/zap/pull/903
+[#912]: https://github.com/uber-go/zap/pull/912
+[#913]: https://github.com/uber-go/zap/pull/913
+[#928]: https://github.com/uber-go/zap/pull/928
+[#931]: https://github.com/uber-go/zap/pull/931
+[#936]: https://github.com/uber-go/zap/pull/936
+
## 1.16.0 (1 Sep 2020)
Bugfixes:
@@ -218,6 +254,17 @@ Enhancements:
Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
+[#629]: https://github.com/uber-go/zap/pull/629
+[#697]: https://github.com/uber-go/zap/pull/697
+[#828]: https://github.com/uber-go/zap/pull/828
+[#835]: https://github.com/uber-go/zap/pull/835
+[#843]: https://github.com/uber-go/zap/pull/843
+[#844]: https://github.com/uber-go/zap/pull/844
+[#852]: https://github.com/uber-go/zap/pull/852
+[#854]: https://github.com/uber-go/zap/pull/854
+[#861]: https://github.com/uber-go/zap/pull/861
+[#862]: https://github.com/uber-go/zap/pull/862
+
## 1.15.0 (23 Apr 2020)
Bugfixes:
@@ -234,6 +281,11 @@ Enhancements:
Thanks to @danielbprice for their contributions to this release.
+[#804]: https://github.com/uber-go/zap/pull/804
+[#812]: https://github.com/uber-go/zap/pull/812
+[#806]: https://github.com/uber-go/zap/pull/806
+[#813]: https://github.com/uber-go/zap/pull/813
+
## 1.14.1 (14 Mar 2020)
Bugfixes:
@@ -246,6 +298,10 @@ Bugfixes:
Thanks to @YashishDua for their contributions to this release.
+[#791]: https://github.com/uber-go/zap/pull/791
+[#795]: https://github.com/uber-go/zap/pull/795
+[#799]: https://github.com/uber-go/zap/pull/799
+
## 1.14.0 (20 Feb 2020)
Enhancements:
@@ -256,6 +312,11 @@ Enhancements:
Thanks to @caibirdme for their contributions to this release.
+[#771]: https://github.com/uber-go/zap/pull/771
+[#773]: https://github.com/uber-go/zap/pull/773
+[#775]: https://github.com/uber-go/zap/pull/775
+[#786]: https://github.com/uber-go/zap/pull/786
+
## 1.13.0 (13 Nov 2019)
Enhancements:
@@ -264,11 +325,15 @@ Enhancements:
Thanks to @jbizzle for their contributions to this release.
+[#758]: https://github.com/uber-go/zap/pull/758
+
## 1.12.0 (29 Oct 2019)
Enhancements:
* [#751][]: Migrate to Go modules.
+[#751]: https://github.com/uber-go/zap/pull/751
+
## 1.11.0 (21 Oct 2019)
Enhancements:
@@ -277,6 +342,9 @@ Enhancements:
Thanks to @juicemia, @uhthomas for their contributions to this release.
+[#725]: https://github.com/uber-go/zap/pull/725
+[#736]: https://github.com/uber-go/zap/pull/736
+
## 1.10.0 (29 Apr 2019)
Bugfixes:
@@ -294,13 +362,21 @@ Enhancements:
Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
to this release.
-## v1.9.1 (06 Aug 2018)
+[#657]: https://github.com/uber-go/zap/pull/657
+[#706]: https://github.com/uber-go/zap/pull/706
+[#610]: https://github.com/uber-go/zap/pull/610
+[#675]: https://github.com/uber-go/zap/pull/675
+[#704]: https://github.com/uber-go/zap/pull/704
+
+## 1.9.1 (06 Aug 2018)
Bugfixes:
* [#614][]: MapObjectEncoder should not ignore empty slices.
-## v1.9.0 (19 Jul 2018)
+[#614]: https://github.com/uber-go/zap/pull/614
+
+## 1.9.0 (19 Jul 2018)
Enhancements:
* [#602][]: Reduce number of allocations when logging with reflection.
@@ -309,7 +385,11 @@ Enhancements:
Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
@dimroc for their contributions to this release.
-## v1.8.0 (13 Apr 2018)
+[#602]: https://github.com/uber-go/zap/pull/602
+[#572]: https://github.com/uber-go/zap/pull/572
+[#606]: https://github.com/uber-go/zap/pull/606
+
+## 1.8.0 (13 Apr 2018)
Enhancements:
* [#508][]: Make log level configurable when redirecting the standard
@@ -322,19 +402,28 @@ Bugfixes:
Thanks to @DiSiqueira and @djui for their contributions to this release.
-## v1.7.1 (25 Sep 2017)
+[#508]: https://github.com/uber-go/zap/pull/508
+[#518]: https://github.com/uber-go/zap/pull/518
+[#577]: https://github.com/uber-go/zap/pull/577
+[#574]: https://github.com/uber-go/zap/pull/574
+
+## 1.7.1 (25 Sep 2017)
Bugfixes:
* [#504][]: Store strings when using AddByteString with the map encoder.
-## v1.7.0 (21 Sep 2017)
+[#504]: https://github.com/uber-go/zap/pull/504
+
+## 1.7.0 (21 Sep 2017)
Enhancements:
* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
to specify the level of the logged messages.
-## v1.6.0 (30 Aug 2017)
+[#487]: https://github.com/uber-go/zap/pull/487
+
+## 1.6.0 (30 Aug 2017)
Enhancements:
@@ -342,7 +431,10 @@ Enhancements:
* [#490][]: Add a `ContextMap` method to observer logs for simpler
field validation in tests.
-## v1.5.0 (22 Jul 2017)
+[#490]: https://github.com/uber-go/zap/pull/490
+[#491]: https://github.com/uber-go/zap/pull/491
+
+## 1.5.0 (22 Jul 2017)
Enhancements:
@@ -355,7 +447,12 @@ Bugfixes:
Thanks to @richard-tunein and @pavius for their contributions to this release.
-## v1.4.1 (08 Jun 2017)
+[#477]: https://github.com/uber-go/zap/pull/477
+[#465]: https://github.com/uber-go/zap/pull/465
+[#460]: https://github.com/uber-go/zap/pull/460
+[#470]: https://github.com/uber-go/zap/pull/470
+
+## 1.4.1 (08 Jun 2017)
This release fixes two bugs.
@@ -364,7 +461,10 @@ Bugfixes:
* [#435][]: Support a variety of case conventions when unmarshaling levels.
* [#444][]: Fix a panic in the observer.
-## v1.4.0 (12 May 2017)
+[#435]: https://github.com/uber-go/zap/pull/435
+[#444]: https://github.com/uber-go/zap/pull/444
+
+## 1.4.0 (12 May 2017)
This release adds a few small features and is fully backward-compatible.
@@ -376,7 +476,11 @@ Enhancements:
* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
variety of operations a bit simpler.
-## v1.3.0 (25 Apr 2017)
+[#424]: https://github.com/uber-go/zap/pull/424
+[#425]: https://github.com/uber-go/zap/pull/425
+[#431]: https://github.com/uber-go/zap/pull/431
+
+## 1.3.0 (25 Apr 2017)
This release adds an enhancement to zap's testing helpers as well as the
ability to marshal an AtomicLevel. It is fully backward-compatible.
@@ -387,7 +491,10 @@ Enhancements:
particularly useful when testing the `SugaredLogger`.
* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
-## v1.2.0 (13 Apr 2017)
+[#415]: https://github.com/uber-go/zap/pull/415
+[#416]: https://github.com/uber-go/zap/pull/416
+
+## 1.2.0 (13 Apr 2017)
This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
@@ -396,7 +503,9 @@ Enhancements:
* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
`grpclog.Logger`.
-## v1.1.0 (31 Mar 2017)
+[#402]: https://github.com/uber-go/zap/pull/402
+
+## 1.1.0 (31 Mar 2017)
This release fixes two bugs and adds some enhancements to zap's testing helpers.
It is fully backward-compatible.
@@ -413,7 +522,11 @@ Enhancements:
Thanks to @moitias for contributing to this release.
-## v1.0.0 (14 Mar 2017)
+[#385]: https://github.com/uber-go/zap/pull/385
+[#396]: https://github.com/uber-go/zap/pull/396
+[#386]: https://github.com/uber-go/zap/pull/386
+
+## 1.0.0 (14 Mar 2017)
This is zap's first stable release. All exported APIs are now final, and no
further breaking changes will be made in the 1.x release series. Anyone using a
@@ -458,7 +571,21 @@ Enhancements:
Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
contributions to this release.
-## v1.0.0-rc.3 (7 Mar 2017)
+[#366]: https://github.com/uber-go/zap/pull/366
+[#364]: https://github.com/uber-go/zap/pull/364
+[#371]: https://github.com/uber-go/zap/pull/371
+[#362]: https://github.com/uber-go/zap/pull/362
+[#369]: https://github.com/uber-go/zap/pull/369
+[#347]: https://github.com/uber-go/zap/pull/347
+[#373]: https://github.com/uber-go/zap/pull/373
+[#348]: https://github.com/uber-go/zap/pull/348
+[#327]: https://github.com/uber-go/zap/pull/327
+[#376]: https://github.com/uber-go/zap/pull/376
+[#346]: https://github.com/uber-go/zap/pull/346
+[#365]: https://github.com/uber-go/zap/pull/365
+[#372]: https://github.com/uber-go/zap/pull/372
+
+## 1.0.0-rc.3 (7 Mar 2017)
This is the third release candidate for zap's stable release. There are no
breaking changes.
@@ -479,7 +606,12 @@ Enhancements:
Thanks to @ansel1 and @suyash for their contributions to this release.
-## v1.0.0-rc.2 (21 Feb 2017)
+[#339]: https://github.com/uber-go/zap/pull/339
+[#307]: https://github.com/uber-go/zap/pull/307
+[#353]: https://github.com/uber-go/zap/pull/353
+[#311]: https://github.com/uber-go/zap/pull/311
+
+## 1.0.0-rc.2 (21 Feb 2017)
This is the second release candidate for zap's stable release. It includes two
breaking changes.
@@ -516,7 +648,16 @@ Enhancements:
Thanks to @skipor and @chapsuk for their contributions to this release.
-## v1.0.0-rc.1 (14 Feb 2017)
+[#316]: https://github.com/uber-go/zap/pull/316
+[#309]: https://github.com/uber-go/zap/pull/309
+[#317]: https://github.com/uber-go/zap/pull/317
+[#321]: https://github.com/uber-go/zap/pull/321
+[#325]: https://github.com/uber-go/zap/pull/325
+[#333]: https://github.com/uber-go/zap/pull/333
+[#326]: https://github.com/uber-go/zap/pull/326
+[#300]: https://github.com/uber-go/zap/pull/300
+
+## 1.0.0-rc.1 (14 Feb 2017)
This is the first release candidate for zap's stable release. There are multiple
breaking changes and improvements from the pre-release version. Most notably:
@@ -536,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably:
* Sampling is more accurate, and doesn't depend on the standard library's shared
timer heap.
-## v0.1.0-beta.1 (6 Feb 2017)
+## 0.1.0-beta.1 (6 Feb 2017)
This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
upgrade at their leisure. Since this is the first tagged release, there are no
@@ -544,95 +685,3 @@ backward compatibility concerns and all functionality is new.
Early zap adopters should pin to the 0.1.x minor version until they're ready to
upgrade to the upcoming stable release.
-
-[#316]: https://github.com/uber-go/zap/pull/316
-[#309]: https://github.com/uber-go/zap/pull/309
-[#317]: https://github.com/uber-go/zap/pull/317
-[#321]: https://github.com/uber-go/zap/pull/321
-[#325]: https://github.com/uber-go/zap/pull/325
-[#333]: https://github.com/uber-go/zap/pull/333
-[#326]: https://github.com/uber-go/zap/pull/326
-[#300]: https://github.com/uber-go/zap/pull/300
-[#339]: https://github.com/uber-go/zap/pull/339
-[#307]: https://github.com/uber-go/zap/pull/307
-[#353]: https://github.com/uber-go/zap/pull/353
-[#311]: https://github.com/uber-go/zap/pull/311
-[#366]: https://github.com/uber-go/zap/pull/366
-[#364]: https://github.com/uber-go/zap/pull/364
-[#371]: https://github.com/uber-go/zap/pull/371
-[#362]: https://github.com/uber-go/zap/pull/362
-[#369]: https://github.com/uber-go/zap/pull/369
-[#347]: https://github.com/uber-go/zap/pull/347
-[#373]: https://github.com/uber-go/zap/pull/373
-[#348]: https://github.com/uber-go/zap/pull/348
-[#327]: https://github.com/uber-go/zap/pull/327
-[#376]: https://github.com/uber-go/zap/pull/376
-[#346]: https://github.com/uber-go/zap/pull/346
-[#365]: https://github.com/uber-go/zap/pull/365
-[#372]: https://github.com/uber-go/zap/pull/372
-[#385]: https://github.com/uber-go/zap/pull/385
-[#396]: https://github.com/uber-go/zap/pull/396
-[#386]: https://github.com/uber-go/zap/pull/386
-[#402]: https://github.com/uber-go/zap/pull/402
-[#415]: https://github.com/uber-go/zap/pull/415
-[#416]: https://github.com/uber-go/zap/pull/416
-[#424]: https://github.com/uber-go/zap/pull/424
-[#425]: https://github.com/uber-go/zap/pull/425
-[#431]: https://github.com/uber-go/zap/pull/431
-[#435]: https://github.com/uber-go/zap/pull/435
-[#444]: https://github.com/uber-go/zap/pull/444
-[#477]: https://github.com/uber-go/zap/pull/477
-[#465]: https://github.com/uber-go/zap/pull/465
-[#460]: https://github.com/uber-go/zap/pull/460
-[#470]: https://github.com/uber-go/zap/pull/470
-[#487]: https://github.com/uber-go/zap/pull/487
-[#490]: https://github.com/uber-go/zap/pull/490
-[#491]: https://github.com/uber-go/zap/pull/491
-[#504]: https://github.com/uber-go/zap/pull/504
-[#508]: https://github.com/uber-go/zap/pull/508
-[#518]: https://github.com/uber-go/zap/pull/518
-[#577]: https://github.com/uber-go/zap/pull/577
-[#574]: https://github.com/uber-go/zap/pull/574
-[#602]: https://github.com/uber-go/zap/pull/602
-[#572]: https://github.com/uber-go/zap/pull/572
-[#606]: https://github.com/uber-go/zap/pull/606
-[#614]: https://github.com/uber-go/zap/pull/614
-[#657]: https://github.com/uber-go/zap/pull/657
-[#706]: https://github.com/uber-go/zap/pull/706
-[#610]: https://github.com/uber-go/zap/pull/610
-[#675]: https://github.com/uber-go/zap/pull/675
-[#704]: https://github.com/uber-go/zap/pull/704
-[#725]: https://github.com/uber-go/zap/pull/725
-[#736]: https://github.com/uber-go/zap/pull/736
-[#751]: https://github.com/uber-go/zap/pull/751
-[#758]: https://github.com/uber-go/zap/pull/758
-[#771]: https://github.com/uber-go/zap/pull/771
-[#773]: https://github.com/uber-go/zap/pull/773
-[#775]: https://github.com/uber-go/zap/pull/775
-[#786]: https://github.com/uber-go/zap/pull/786
-[#791]: https://github.com/uber-go/zap/pull/791
-[#795]: https://github.com/uber-go/zap/pull/795
-[#799]: https://github.com/uber-go/zap/pull/799
-[#804]: https://github.com/uber-go/zap/pull/804
-[#812]: https://github.com/uber-go/zap/pull/812
-[#806]: https://github.com/uber-go/zap/pull/806
-[#813]: https://github.com/uber-go/zap/pull/813
-[#629]: https://github.com/uber-go/zap/pull/629
-[#697]: https://github.com/uber-go/zap/pull/697
-[#828]: https://github.com/uber-go/zap/pull/828
-[#835]: https://github.com/uber-go/zap/pull/835
-[#843]: https://github.com/uber-go/zap/pull/843
-[#844]: https://github.com/uber-go/zap/pull/844
-[#852]: https://github.com/uber-go/zap/pull/852
-[#854]: https://github.com/uber-go/zap/pull/854
-[#861]: https://github.com/uber-go/zap/pull/861
-[#862]: https://github.com/uber-go/zap/pull/862
-[#865]: https://github.com/uber-go/zap/pull/865
-[#867]: https://github.com/uber-go/zap/pull/867
-[#881]: https://github.com/uber-go/zap/pull/881
-[#903]: https://github.com/uber-go/zap/pull/903
-[#912]: https://github.com/uber-go/zap/pull/912
-[#913]: https://github.com/uber-go/zap/pull/913
-[#928]: https://github.com/uber-go/zap/pull/928
-[#931]: https://github.com/uber-go/zap/pull/931
-[#936]: https://github.com/uber-go/zap/pull/936
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/CODE_OF_CONDUCT.md b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/CODE_OF_CONDUCT.md
index e327d9a..e327d9a 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/CODE_OF_CONDUCT.md
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/CODE_OF_CONDUCT.md
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/CONTRIBUTING.md b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/CONTRIBUTING.md
index ea02f3c..ea02f3c 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/CONTRIBUTING.md
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/CONTRIBUTING.md
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/FAQ.md b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/FAQ.md
index b183b20..b183b20 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/FAQ.md
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/FAQ.md
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/LICENSE.txt b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/LICENSE
index 6652bed..6652bed 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/LICENSE.txt
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/LICENSE
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/Makefile b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/Makefile
new file mode 100644
index 0000000..eb1cee5
--- /dev/null
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/Makefile
@@ -0,0 +1,76 @@
+# Directory containing the Makefile.
+PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
+
+export GOBIN ?= $(PROJECT_ROOT)/bin
+export PATH := $(GOBIN):$(PATH)
+
+GOVULNCHECK = $(GOBIN)/govulncheck
+BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
+
+# Directories containing independent Go modules.
+MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test
+
+# Directories that we want to track coverage for.
+COVER_DIRS = . ./exp
+
+.PHONY: all
+all: lint test
+
+.PHONY: lint
+lint: golangci-lint tidy-lint license-lint
+
+.PHONY: golangci-lint
+golangci-lint:
+ @$(foreach mod,$(MODULE_DIRS), \
+ (cd $(mod) && \
+ echo "[lint] golangci-lint: $(mod)" && \
+ golangci-lint run --path-prefix $(mod)) &&) true
+
+.PHONY: tidy
+tidy:
+ @$(foreach dir,$(MODULE_DIRS), \
+ (cd $(dir) && go mod tidy) &&) true
+
+.PHONY: tidy-lint
+tidy-lint:
+ @$(foreach mod,$(MODULE_DIRS), \
+ (cd $(mod) && \
+ echo "[lint] tidy: $(mod)" && \
+ go mod tidy && \
+ git diff --exit-code -- go.mod go.sum) &&) true
+
+
+.PHONY: license-lint
+license-lint:
+ ./checklicense.sh
+
+$(GOVULNCHECK):
+ cd tools && go install golang.org/x/vuln/cmd/govulncheck
+
+.PHONY: test
+test:
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true
+
+.PHONY: cover
+cover:
+ @$(foreach dir,$(COVER_DIRS), ( \
+ cd $(dir) && \
+ go test -race -coverprofile=cover.out -coverpkg=./... ./... \
+ && go tool cover -html=cover.out -o cover.html) &&) true
+
+.PHONY: bench
+BENCH ?= .
+bench:
+ @$(foreach dir,$(MODULE_DIRS), ( \
+ cd $(dir) && \
+ go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \
+ ) &&) true
+
+.PHONY: updatereadme
+updatereadme:
+ rm -f README.md
+ cat .readme.tmpl | go run internal/readme/readme.go > README.md
+
+.PHONY: vulncheck
+vulncheck: $(GOVULNCHECK)
+ $(GOVULNCHECK) ./...
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/README.md b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/README.md
index 9de0892..a17035c 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/README.md
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/README.md
@@ -1,7 +1,16 @@
-# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+# :zap: zap
+
+
+<div align="center">
Blazing fast, structured, leveled logging in Go.
+![Zap logo](assets/logo.png)
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+</div>
+
## Installation
`go get -u go.uber.org/zap`
@@ -66,41 +75,44 @@ Log a message and 10 fields:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 1744 ns/op | +0% | 5 allocs/op
-| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op
-| zerolog | 918 ns/op | -47% | 1 allocs/op
-| go-kit | 5590 ns/op | +221% | 57 allocs/op
-| slog | 5640 ns/op | +223% | 40 allocs/op
-| apex/log | 21184 ns/op | +1115% | 63 allocs/op
-| logrus | 24338 ns/op | +1296% | 79 allocs/op
-| log15 | 26054 ns/op | +1394% | 74 allocs/op
+| :zap: zap | 656 ns/op | +0% | 5 allocs/op
+| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op
+| zerolog | 380 ns/op | -42% | 1 allocs/op
+| go-kit | 2249 ns/op | +243% | 57 allocs/op
+| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op
+| slog | 2481 ns/op | +278% | 42 allocs/op
+| apex/log | 9591 ns/op | +1362% | 63 allocs/op
+| log15 | 11393 ns/op | +1637% | 75 allocs/op
+| logrus | 11654 ns/op | +1677% | 79 allocs/op
Log a message with a logger that already has 10 fields of context:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 193 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op
-| zerolog | 81 ns/op | -58% | 0 allocs/op
-| slog | 322 ns/op | +67% | 0 allocs/op
-| go-kit | 5377 ns/op | +2686% | 56 allocs/op
-| apex/log | 19518 ns/op | +10013% | 53 allocs/op
-| log15 | 19812 ns/op | +10165% | 70 allocs/op
-| logrus | 21997 ns/op | +11297% | 68 allocs/op
+| :zap: zap | 67 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op
+| zerolog | 35 ns/op | -48% | 0 allocs/op
+| slog | 193 ns/op | +188% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op
+| go-kit | 2460 ns/op | +3572% | 56 allocs/op
+| log15 | 9038 ns/op | +13390% | 70 allocs/op
+| apex/log | 9068 ns/op | +13434% | 53 allocs/op
+| logrus | 10521 ns/op | +15603% | 68 allocs/op
Log a static string, without any context or `printf`-style templating:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 165 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op
-| zerolog | 95 ns/op | -42% | 0 allocs/op
-| slog | 296 ns/op | +79% | 0 allocs/op
-| go-kit | 415 ns/op | +152% | 9 allocs/op
-| standard library | 422 ns/op | +156% | 2 allocs/op
-| apex/log | 1601 ns/op | +870% | 5 allocs/op
-| logrus | 3017 ns/op | +1728% | 23 allocs/op
-| log15 | 3469 ns/op | +2002% | 20 allocs/op
+| :zap: zap | 63 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op
+| zerolog | 32 ns/op | -49% | 0 allocs/op
+| standard library | 124 ns/op | +97% | 1 allocs/op
+| slog | 196 ns/op | +211% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op
+| go-kit | 213 ns/op | +238% | 9 allocs/op
+| apex/log | 771 ns/op | +1124% | 5 allocs/op
+| logrus | 1439 ns/op | +2184% | 23 allocs/op
+| log15 | 2069 ns/op | +3184% | 20 allocs/op
## Development Status: Stable
@@ -120,7 +132,7 @@ standard.
<hr>
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/array.go
index 5be3704..abfccb5 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/array.go
@@ -21,6 +21,7 @@
package zap
import (
+ "fmt"
"time"
"go.uber.org/zap/zapcore"
@@ -94,11 +95,137 @@ func Int8s(key string, nums []int8) Field {
return Array(key, int8s(nums))
}
+// Objects constructs a field with the given key, holding a list of the
+// provided objects that can be marshaled by Zap.
+//
+// Note that these objects must implement zapcore.ObjectMarshaler directly.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the Request type, not its pointer (*Request).
+// If it's on the pointer, use ObjectValues.
+//
+// Given an object that implements MarshalLogObject on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Author struct{ ... }
+// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var authors []Author = ...
+// logger.Info("loading article", zap.Objects("authors", authors))
+//
+// Similarly, given a type that implements MarshalLogObject on its pointer
+// receiver, you can log a slice of pointers to that object with Objects like
+// so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+//
+// If instead, you have a slice of values of such an object, use the
+// ObjectValues constructor.
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
+ return Array(key, objects[T](values))
+}
+
+type objects[T zapcore.ObjectMarshaler] []T
+
+func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ if err := arr.AppendObject(o); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ObjectMarshalerPtr is a constraint that specifies that the given type
+// implements zapcore.ObjectMarshaler on a pointer receiver.
+type ObjectMarshalerPtr[T any] interface {
+ *T
+ zapcore.ObjectMarshaler
+}
+
+// ObjectValues constructs a field with the given key, holding a list of the
+// provided objects, where pointers to these objects can be marshaled by Zap.
+//
+// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the *Request type, not the value (Request).
+// If it's on the value, use Objects.
+//
+// Given an object that implements MarshalLogObject on the pointer receiver,
+// you can log a slice of those objects with ObjectValues like so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+//
+// If instead, you have a slice of pointers of such an object, use the Objects
+// field constructor.
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
+ return Array(key, objectValues[T, P](values))
+}
+
+type objectValues[T any, P ObjectMarshalerPtr[T]] []T
+
+func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range os {
+ // It is necessary for us to explicitly reference the "P" type.
+ // We cannot simply pass "&os[i]" to AppendObject because its type
+ // is "*T", which the type system does not consider as
+ // implementing ObjectMarshaler.
+ // Only the type "P" satisfies ObjectMarshaler, which we have
+ // to convert "*T" to explicitly.
+ var p P = &os[i]
+ if err := arr.AppendObject(p); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// Strings constructs a field that carries a slice of strings.
func Strings(key string, ss []string) Field {
return Array(key, stringArray(ss))
}
+// Stringers constructs a field with the given key, holding a list of the
+// output provided by the value's String method
+//
+// Given an object that implements String on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Request struct{ ... }
+// func (a Request) String() string
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.Stringers("requests", requests))
+//
+// Note that these objects must implement fmt.Stringer directly.
+// That is, if you're trying to marshal a []Request, the String method
+// must be declared on the Request type, not its pointer (*Request).
+func Stringers[T fmt.Stringer](key string, values []T) Field {
+ return Array(key, stringers[T](values))
+}
+
+type stringers[T fmt.Stringer] []T
+
+func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ arr.AppendString(o.String())
+ }
+ return nil
+}
+
// Times constructs a field that carries a slice of time.Times.
func Times(key string, ts []time.Time) Field {
return Array(key, times(ts))
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/array_test.go
index 961cb1c..97738c9 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/array_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/array_test.go
@@ -21,12 +21,15 @@
package zap
import (
+ "errors"
+ "fmt"
"testing"
"time"
"go.uber.org/zap/zapcore"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func BenchmarkBoolsArrayMarshaler(b *testing.B) {
@@ -105,3 +108,209 @@ func TestArrayWrappers(t *testing.T) {
assert.Equal(t, 1, len(enc.Fields), "%s: found extra keys in map: %v", tt.desc, enc.Fields)
}
}
+
+func TestObjectsAndObjectValues(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ desc string
+ give Field
+ want []any
+ }{
+ {
+ desc: "Objects/nil slice",
+ give: Objects[*emptyObject]("", nil),
+ want: []any{},
+ },
+ {
+ desc: "ObjectValues/nil slice",
+ give: ObjectValues[emptyObject]("", nil),
+ want: []any{},
+ },
+ {
+ desc: "ObjectValues/empty slice",
+ give: ObjectValues("", []emptyObject{}),
+ want: []any{},
+ },
+ {
+ desc: "ObjectValues/single item",
+ give: ObjectValues("", []emptyObject{
+ {},
+ }),
+ want: []any{
+ map[string]any{},
+ },
+ },
+ {
+ desc: "Objects/multiple different objects",
+ give: Objects("", []*fakeObject{
+ {value: "foo"},
+ {value: "bar"},
+ {value: "baz"},
+ }),
+ want: []any{
+ map[string]any{"value": "foo"},
+ map[string]any{"value": "bar"},
+ map[string]any{"value": "baz"},
+ },
+ },
+ {
+ desc: "ObjectValues/multiple different objects",
+ give: ObjectValues("", []fakeObject{
+ {value: "foo"},
+ {value: "bar"},
+ {value: "baz"},
+ }),
+ want: []any{
+ map[string]any{"value": "foo"},
+ map[string]any{"value": "bar"},
+ map[string]any{"value": "baz"},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.desc, func(t *testing.T) {
+ t.Parallel()
+
+ tt.give.Key = "k"
+
+ enc := zapcore.NewMapObjectEncoder()
+ tt.give.AddTo(enc)
+ assert.Equal(t, tt.want, enc.Fields["k"])
+ })
+ }
+}
+
+type emptyObject struct{}
+
+func (*emptyObject) MarshalLogObject(zapcore.ObjectEncoder) error {
+ return nil
+}
+
+type fakeObject struct {
+ value string
+ err error // marshaling error, if any
+}
+
+func (o *fakeObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ enc.AddString("value", o.value)
+ return o.err
+}
+
+func TestObjectsAndObjectValues_marshalError(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ desc string
+ give Field
+ want []any
+ wantErr string
+ }{
+ {
+ desc: "Objects",
+ give: Objects("", []*fakeObject{
+ {value: "foo"},
+ {value: "bar", err: errors.New("great sadness")},
+ {value: "baz"}, // does not get marshaled
+ }),
+ want: []any{
+ map[string]any{"value": "foo"},
+ map[string]any{"value": "bar"},
+ },
+ wantErr: "great sadness",
+ },
+ {
+ desc: "ObjectValues",
+ give: ObjectValues("", []fakeObject{
+ {value: "foo"},
+ {value: "bar", err: errors.New("stuff failed")},
+ {value: "baz"}, // does not get marshaled
+ }),
+ want: []any{
+ map[string]any{"value": "foo"},
+ map[string]any{"value": "bar"},
+ },
+ wantErr: "stuff failed",
+ },
+ }
+
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.desc, func(t *testing.T) {
+ t.Parallel()
+
+ tt.give.Key = "k"
+
+ enc := zapcore.NewMapObjectEncoder()
+ tt.give.AddTo(enc)
+
+ require.Contains(t, enc.Fields, "k")
+ assert.Equal(t, tt.want, enc.Fields["k"])
+
+ // AddTo puts the error in a "%vError" field based on the name of the
+ // original field.
+ require.Contains(t, enc.Fields, "kError")
+ assert.Equal(t, tt.wantErr, enc.Fields["kError"])
+ })
+ }
+}
+
+type stringerObject struct {
+ value string
+}
+
+func (s stringerObject) String() string {
+ return s.value
+}
+
+func TestStringers(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ desc string
+ give Field
+ want []any
+ }{
+ {
+ desc: "Stringers",
+ give: Stringers("", []stringerObject{
+ {value: "foo"},
+ {value: "bar"},
+ {value: "baz"},
+ }),
+ want: []any{
+ "foo",
+ "bar",
+ "baz",
+ },
+ },
+ {
+ desc: "Stringers with []fmt.Stringer",
+ give: Stringers("", []fmt.Stringer{
+ stringerObject{value: "foo"},
+ stringerObject{value: "bar"},
+ stringerObject{value: "baz"},
+ }),
+ want: []any{
+ "foo",
+ "bar",
+ "baz",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.desc, func(t *testing.T) {
+ t.Parallel()
+
+ tt.give.Key = "k"
+
+ enc := zapcore.NewMapObjectEncoder()
+ tt.give.AddTo(enc)
+ assert.Equal(t, tt.want, enc.Fields["k"])
+ })
+ }
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/buffer.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/buffer.go
index 9e929cd..0b8540c 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/buffer.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/buffer.go
@@ -42,6 +42,11 @@ func (b *Buffer) AppendByte(v byte) {
b.bs = append(b.bs, v)
}
+// AppendBytes writes the given slice of bytes to the Buffer.
+func (b *Buffer) AppendBytes(v []byte) {
+ b.bs = append(b.bs, v...)
+}
+
// AppendString writes a string to the Buffer.
func (b *Buffer) AppendString(s string) {
b.bs = append(b.bs, s...)
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/buffer_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/buffer_test.go
index 71ffac1..71ffac1 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/buffer_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/buffer_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/pool.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/pool.go
index 8463233..8463233 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/pool.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/pool.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/pool_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/pool_test.go
index a219815..a219815 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/buffer/pool_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/buffer/pool_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/checklicense.sh b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/checklicense.sh
index 345ac8b..345ac8b 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/checklicense.sh
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/checklicense.sh
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/clock_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/clock_test.go
index 29825fc..29825fc 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/clock_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/clock_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/common_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/common_test.go
index b0a4a2e..b0a4a2e 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/common_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/common_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/config.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/config.go
index e76e4e6..e76e4e6 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/config.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/config.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/config_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/config_test.go
index 4badd1b..4badd1b 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/config_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/config_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/doc.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/doc.go
index 3c50d7b..3c50d7b 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/doc.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/doc.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/encoder.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/encoder.go
index caa04ce..caa04ce 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/encoder.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/encoder.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/encoder_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/encoder_test.go
index f6be665..b71eb65 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/encoder_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/encoder_test.go
@@ -41,7 +41,7 @@ func TestRegisterEncoder(t *testing.T) {
func TestDuplicateRegisterEncoder(t *testing.T) {
testEncoders(func() {
- RegisterEncoder("foo", newNilEncoder)
+ assert.NoError(t, RegisterEncoder("foo", newNilEncoder), "expected to be able to register the encoder foo")
assert.Error(t, RegisterEncoder("foo", newNilEncoder), "expected an error when registering an encoder with the same name twice")
})
}
@@ -52,7 +52,7 @@ func TestRegisterEncoderNoName(t *testing.T) {
func TestNewEncoder(t *testing.T) {
testEncoders(func() {
- RegisterEncoder("foo", newNilEncoder)
+ assert.NoError(t, RegisterEncoder("foo", newNilEncoder), "expected to be able to register the encoder foo")
encoder, err := newEncoder("foo", zapcore.EncoderConfig{})
assert.NoError(t, err, "could not create an encoder for the registered name foo")
assert.Nil(t, encoder, "the encoder from newNilEncoder is not nil")
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/error.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/error.go
index 38cb768..45f7b83 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/error.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/error.go
@@ -61,9 +61,12 @@ func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
// allocating, pool the wrapper type.
elem := _errArrayElemPool.Get()
elem.error = errs[i]
- arr.AppendObject(elem)
+ err := arr.AppendObject(elem)
elem.error = nil
_errArrayElemPool.Put(elem)
+ if err != nil {
+ return err
+ }
}
return nil
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/error_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/error_test.go
index 64dab19..4bfa370 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/error_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/error_test.go
@@ -95,3 +95,39 @@ func TestErrorsArraysHandleRichErrors(t *testing.T) {
require.True(t, ok, "Expected serialized error to be a map, got %T.", serialized)
assert.Equal(t, "egad", errMap["error"], "Unexpected standard error string.")
}
+
+func TestErrArrayBrokenEncoder(t *testing.T) {
+ t.Parallel()
+
+ failWith := errors.New("great sadness")
+ err := (brokenArrayObjectEncoder{
+ Err: failWith,
+ ObjectEncoder: zapcore.NewMapObjectEncoder(),
+ }).AddArray("errors", errArray{
+ errors.New("foo"),
+ errors.New("bar"),
+ })
+ require.Error(t, err, "Expected error from broken encoder.")
+ assert.ErrorIs(t, err, failWith, "Unexpected error.")
+}
+
+// brokenArrayObjectEncoder is an ObjectEncoder
+// that builds a broken ArrayEncoder.
+type brokenArrayObjectEncoder struct {
+ zapcore.ObjectEncoder
+ zapcore.ArrayEncoder
+
+ Err error // error to return
+}
+
+func (enc brokenArrayObjectEncoder) AddArray(key string, marshaler zapcore.ArrayMarshaler) error {
+ return enc.ObjectEncoder.AddArray(key,
+ zapcore.ArrayMarshalerFunc(func(ae zapcore.ArrayEncoder) error {
+ enc.ArrayEncoder = ae
+ return marshaler.MarshalLogArray(enc)
+ }))
+}
+
+func (enc brokenArrayObjectEncoder) AppendObject(zapcore.ObjectMarshaler) error {
+ return enc.Err
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/example_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/example_test.go
index 327e3ef..af7df0e 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/example_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/example_test.go
@@ -358,3 +358,56 @@ func ExampleWrapCore_wrap() {
// {"level":"info","msg":"doubled"}
// {"level":"info","msg":"doubled"}
}
+
+func ExampleDict() {
+ logger := zap.NewExample()
+ defer logger.Sync()
+
+ logger.Info("login event",
+ zap.Dict("event",
+ zap.Int("id", 123),
+ zap.String("name", "jane"),
+ zap.String("status", "pending")))
+ // Output:
+ // {"level":"info","msg":"login event","event":{"id":123,"name":"jane","status":"pending"}}
+}
+
+func ExampleObjects() {
+ logger := zap.NewExample()
+ defer logger.Sync()
+
+ // Use the Objects field constructor when you have a list of objects,
+ // all of which implement zapcore.ObjectMarshaler.
+ logger.Debug("opening connections",
+ zap.Objects("addrs", []addr{
+ {IP: "123.45.67.89", Port: 4040},
+ {IP: "127.0.0.1", Port: 4041},
+ {IP: "192.168.0.1", Port: 4042},
+ }))
+ // Output:
+ // {"level":"debug","msg":"opening connections","addrs":[{"ip":"123.45.67.89","port":4040},{"ip":"127.0.0.1","port":4041},{"ip":"192.168.0.1","port":4042}]}
+}
+
+func ExampleObjectValues() {
+ logger := zap.NewExample()
+ defer logger.Sync()
+
+ // Use the ObjectValues field constructor when you have a list of
+ // objects that do not implement zapcore.ObjectMarshaler directly,
+ // but on their pointer receivers.
+ logger.Debug("starting tunnels",
+ zap.ObjectValues("addrs", []request{
+ {
+ URL: "/foo",
+ Listen: addr{"127.0.0.1", 8080},
+ Remote: addr{"123.45.67.89", 4040},
+ },
+ {
+ URL: "/bar",
+ Listen: addr{"127.0.0.1", 8080},
+ Remote: addr{"127.0.0.1", 31200},
+ },
+ }))
+ // Output:
+ // {"level":"debug","msg":"starting tunnels","addrs":[{"url":"/foo","ip":"127.0.0.1","port":8080,"remote":{"ip":"123.45.67.89","port":4040}},{"url":"/bar","ip":"127.0.0.1","port":8080,"remote":{"ip":"127.0.0.1","port":31200}}]}
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/field.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/field.go
index 7f22c53..6743930 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/field.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/field.go
@@ -25,6 +25,7 @@ import (
"math"
"time"
+ "go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
@@ -374,7 +375,7 @@ func StackSkip(key string, skip int) Field {
// from expanding the zapcore.Field union struct to include a byte slice. Since
// taking a stacktrace is already so expensive (~10us), the extra allocation
// is okay.
- return String(key, takeStacktrace(skip+1)) // skip StackSkip
+ return String(key, stacktrace.Take(skip+1)) // skip StackSkip
}
// Duration constructs a field with the given key and value. The encoder
@@ -410,6 +411,26 @@ func Inline(val zapcore.ObjectMarshaler) Field {
}
}
+// Dict constructs a field containing the provided key-value pairs.
+// It acts similar to [Object], but with the fields specified as arguments.
+func Dict(key string, val ...Field) Field {
+ return dictField(key, val)
+}
+
+// We need a function with the signature (string, T) for zap.Any.
+func dictField(key string, val []Field) Field {
+ return Object(key, dictObject(val))
+}
+
+type dictObject []Field
+
+func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ for _, f := range d {
+ f.AddTo(enc)
+ }
+ return nil
+}
+
// We discovered an issue where zap.Any can cause a performance degradation
// when used in new goroutines.
//
@@ -439,6 +460,8 @@ func Inline(val zapcore.ObjectMarshaler) Field {
// - https://github.com/uber-go/zap/pull/1304
// - https://github.com/uber-go/zap/pull/1305
// - https://github.com/uber-go/zap/pull/1308
+//
+// See https://github.com/golang/go/issues/62077 for upstream issue.
type anyFieldC[T any] func(string, T) Field
func (f anyFieldC[T]) Any(key string, val any) Field {
@@ -462,6 +485,8 @@ func Any(key string, value interface{}) Field {
c = anyFieldC[zapcore.ObjectMarshaler](Object)
case zapcore.ArrayMarshaler:
c = anyFieldC[zapcore.ArrayMarshaler](Array)
+ case []Field:
+ c = anyFieldC[[]Field](dictField)
case bool:
c = anyFieldC[bool](Bool)
case *bool:
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/field_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/field_test.go
index 8f2f52d..f87f159 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/field_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/field_test.go
@@ -29,6 +29,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
+ "go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
@@ -127,6 +128,7 @@ func TestFieldConstructors(t *testing.T) {
{"Inline", Field{Type: zapcore.InlineMarshalerType, Interface: name}, Inline(name)},
{"Any:ObjectMarshaler", Any("k", name), Object("k", name)},
{"Any:ArrayMarshaler", Any("k", bools([]bool{true})), Array("k", bools([]bool{true}))},
+ {"Any:Dict", Any("k", []Field{String("k", "v")}), Dict("k", String("k", "v"))},
{"Any:Stringer", Any("k", addr), Stringer("k", addr)},
{"Any:Bool", Any("k", true), Bool("k", true)},
{"Any:Bools", Any("k", []bool{true}), Bools("k", []bool{true})},
@@ -268,7 +270,7 @@ func TestStackField(t *testing.T) {
assert.Equal(t, "stacktrace", f.Key, "Unexpected field key.")
assert.Equal(t, zapcore.StringType, f.Type, "Unexpected field type.")
r := regexp.MustCompile(`field_test.go:(\d+)`)
- assert.Equal(t, r.ReplaceAllString(takeStacktrace(0), "field_test.go"), r.ReplaceAllString(f.String, "field_test.go"), "Unexpected stack trace")
+ assert.Equal(t, r.ReplaceAllString(stacktrace.Take(0), "field_test.go"), r.ReplaceAllString(f.String, "field_test.go"), "Unexpected stack trace")
assertCanBeReused(t, f)
}
@@ -277,7 +279,7 @@ func TestStackSkipField(t *testing.T) {
assert.Equal(t, "stacktrace", f.Key, "Unexpected field key.")
assert.Equal(t, zapcore.StringType, f.Type, "Unexpected field type.")
r := regexp.MustCompile(`field_test.go:(\d+)`)
- assert.Equal(t, r.ReplaceAllString(takeStacktrace(0), "field_test.go"), r.ReplaceAllString(f.String, "field_test.go"), f.String, "Unexpected stack trace")
+ assert.Equal(t, r.ReplaceAllString(stacktrace.Take(0), "field_test.go"), r.ReplaceAllString(f.String, "field_test.go"), f.String, "Unexpected stack trace")
assertCanBeReused(t, f)
}
@@ -285,6 +287,30 @@ func TestStackSkipFieldWithSkip(t *testing.T) {
f := StackSkip("stacktrace", 1)
assert.Equal(t, "stacktrace", f.Key, "Unexpected field key.")
assert.Equal(t, zapcore.StringType, f.Type, "Unexpected field type.")
- assert.Equal(t, takeStacktrace(1), f.String, "Unexpected stack trace")
+ assert.Equal(t, stacktrace.Take(1), f.String, "Unexpected stack trace")
assertCanBeReused(t, f)
}
+
+func TestDict(t *testing.T) {
+ tests := []struct {
+ desc string
+ field Field
+ expected any
+ }{
+ {"empty", Dict(""), map[string]any{}},
+ {"single", Dict("", String("k", "v")), map[string]any{"k": "v"}},
+ {"multiple", Dict("", String("k", "v"), String("k2", "v2")), map[string]any{"k": "v", "k2": "v2"}},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.desc, func(t *testing.T) {
+ enc := zapcore.NewMapObjectEncoder()
+ tt.field.Key = "k"
+ tt.field.AddTo(enc)
+ assert.Equal(t, tt.expected, enc.Fields["k"], "unexpected map contents")
+ assert.Len(t, enc.Fields, 1, "found extra keys in map: %v", enc.Fields)
+
+ assertCanBeReused(t, tt.field)
+ })
+ }
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/flag.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/flag.go
index 1312875..1312875 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/flag.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/flag.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/flag_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/flag_test.go
index 9ff5444..9ff5444 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/flag_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/flag_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/glide.yaml b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/glide.yaml
index 8e1d05e..8e1d05e 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/glide.yaml
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/glide.yaml
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/global.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/global.go
index 3cb46c9..3cb46c9 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/global.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/global.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/global_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/global_test.go
index 17fa225..17fa225 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/global_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/global_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/go.mod b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/go.mod
index 455dae4..88575f4 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/go.mod
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/go.mod
@@ -3,9 +3,8 @@ module go.uber.org/zap
go 1.19
require (
- github.com/benbjohnson/clock v1.3.0
github.com/stretchr/testify v1.8.1
- go.uber.org/goleak v1.2.0
+ go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.10.0
gopkg.in/yaml.v3 v3.0.1
)
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/go.sum b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/go.sum
index ffa7955..a725757 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/go.sum
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/go.sum
@@ -1,5 +1,3 @@
-github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
-github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -16,12 +14,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
-go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
-golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/http_handler.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/http_handler.go
index 632b683..2be8f65 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/http_handler.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/http_handler.go
@@ -69,6 +69,13 @@ import (
//
// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if err := lvl.serveHTTP(w, r); err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "internal error: %v", err)
+ }
+}
+
+func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error {
type errorResponse struct {
Error string `json:"error"`
}
@@ -80,19 +87,20 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
- enc.Encode(payload{Level: lvl.Level()})
+ return enc.Encode(payload{Level: lvl.Level()})
+
case http.MethodPut:
requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
- enc.Encode(errorResponse{Error: err.Error()})
- return
+ return enc.Encode(errorResponse{Error: err.Error()})
}
lvl.SetLevel(requestedLvl)
- enc.Encode(payload{Level: lvl.Level()})
+ return enc.Encode(payload{Level: lvl.Level()})
+
default:
w.WriteHeader(http.StatusMethodNotAllowed)
- enc.Encode(errorResponse{
+ return enc.Encode(errorResponse{
Error: "Only GET and PUT are supported.",
})
}
@@ -129,5 +137,4 @@ func decodePutJSON(body io.Reader) (zapcore.Level, error) {
return 0, errors.New("must specify logging level")
}
return *pld.Level, nil
-
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/http_handler_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/http_handler_test.go
index 9fa9c64..9da3dc7 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/http_handler_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/http_handler_test.go
@@ -22,6 +22,7 @@ package zap_test
import (
"encoding/json"
+ "errors"
"net/http"
"net/http/httptest"
"strings"
@@ -167,7 +168,9 @@ func TestAtomicLevelServeHTTP(t *testing.T) {
res, err := http.DefaultClient.Do(req)
require.NoError(t, err, "Error making %s request.", req.Method)
- defer res.Body.Close()
+ defer func() {
+ assert.NoError(t, res.Body.Close(), "Error closing response body.")
+ }()
require.Equal(t, tt.expectedCode, res.StatusCode, "Unexpected status code.")
if tt.expectedCode != http.StatusOK {
@@ -188,3 +191,27 @@ func TestAtomicLevelServeHTTP(t *testing.T) {
})
}
}
+
+func TestAtomicLevelServeHTTPBrokenWriter(t *testing.T) {
+ t.Parallel()
+
+ lvl := zap.NewAtomicLevel()
+
+ request, err := http.NewRequest(http.MethodGet, "http://localhost:1234/log/level", nil)
+ require.NoError(t, err, "Error constructing request.")
+
+ recorder := httptest.NewRecorder()
+ lvl.ServeHTTP(&brokenHTTPResponseWriter{
+ ResponseWriter: recorder,
+ }, request)
+
+ assert.Equal(t, http.StatusInternalServerError, recorder.Code, "Unexpected status code.")
+}
+
+type brokenHTTPResponseWriter struct {
+ http.ResponseWriter
+}
+
+func (w *brokenHTTPResponseWriter) Write([]byte) (int, error) {
+ return 0, errors.New("great sadness")
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/increase_level_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/increase_level_test.go
index 2d88380..2d88380 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/increase_level_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/increase_level_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/bufferpool/bufferpool.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/bufferpool/bufferpool.go
index dad583a..dad583a 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/bufferpool/bufferpool.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/bufferpool/bufferpool.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/color/color.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/color/color.go
index c4d5d02..c4d5d02 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/color/color.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/color/color.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/color/color_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/color/color_test.go
index 4982903..4982903 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/color/color_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/color/color_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/exit/exit.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/exit/exit.go
index f673f99..f673f99 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/exit/exit.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/exit/exit.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/exit/exit_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/exit/exit_test.go
index 2299584..2299584 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/exit/exit_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/exit/exit_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/level_enabler.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/level_enabler.go
index 40bfed8..40bfed8 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/level_enabler.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/level_enabler.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/pool/pool.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/pool/pool.go
index 60e9d2c..60e9d2c 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/pool/pool.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/pool/pool.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/pool/pool_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/pool/pool_test.go
index 094edf9..094edf9 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/pool/pool_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/pool/pool_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/readme/readme.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/readme/readme.go
index 1487659..a9c8ad7 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/readme/readme.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/readme/readme.go
@@ -35,19 +35,18 @@ import (
"time"
)
-var (
- libraryNameToMarkdownName = map[string]string{
- "Zap": ":zap: zap",
- "Zap.Sugar": ":zap: zap (sugared)",
- "stdlib.Println": "standard library",
- "sirupsen/logrus": "logrus",
- "go-kit/kit/log": "go-kit",
- "inconshreveable/log15": "log15",
- "apex/log": "apex/log",
- "rs/zerolog": "zerolog",
- "slog": "slog",
- }
-)
+var libraryNameToMarkdownName = map[string]string{
+ "Zap": ":zap: zap",
+ "Zap.Sugar": ":zap: zap (sugared)",
+ "stdlib.Println": "standard library",
+ "sirupsen/logrus": "logrus",
+ "go-kit/kit/log": "go-kit",
+ "inconshreveable/log15": "log15",
+ "apex/log": "apex/log",
+ "rs/zerolog": "zerolog",
+ "slog": "slog",
+ "slog.LogAttrs": "slog (LogAttrs)",
+}
func main() {
flag.Parse()
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/stacktrace.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/stacktrace/stack.go
index 1f152eb..82af755 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/stacktrace.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/stacktrace/stack.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,7 +18,9 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package zap
+// Package stacktrace provides support for gathering stack traces
+// efficiently.
+package stacktrace
import (
"runtime"
@@ -28,13 +30,14 @@ import (
"go.uber.org/zap/internal/pool"
)
-var _stacktracePool = pool.New(func() *stacktrace {
- return &stacktrace{
+var _stackPool = pool.New(func() *Stack {
+ return &Stack{
storage: make([]uintptr, 64),
}
})
-type stacktrace struct {
+// Stack is a captured stack trace.
+type Stack struct {
pcs []uintptr // program counters; always a subslice of storage
frames *runtime.Frames
@@ -48,30 +51,30 @@ type stacktrace struct {
storage []uintptr
}
-// stacktraceDepth specifies how deep of a stack trace should be captured.
-type stacktraceDepth int
+// Depth specifies how deep of a stack trace should be captured.
+type Depth int
const (
- // stacktraceFirst captures only the first frame.
- stacktraceFirst stacktraceDepth = iota
+ // First captures only the first frame.
+ First Depth = iota
- // stacktraceFull captures the entire call stack, allocating more
+ // Full captures the entire call stack, allocating more
// storage for it if needed.
- stacktraceFull
+ Full
)
-// captureStacktrace captures a stack trace of the specified depth, skipping
+// Capture captures a stack trace of the specified depth, skipping
// the provided number of frames. skip=0 identifies the caller of
-// captureStacktrace.
+// Capture.
//
// The caller must call Free on the returned stacktrace after using it.
-func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
- stack := _stacktracePool.Get()
+func Capture(skip int, depth Depth) *Stack {
+ stack := _stackPool.Get()
switch depth {
- case stacktraceFirst:
+ case First:
stack.pcs = stack.storage[:1]
- case stacktraceFull:
+ case Full:
stack.pcs = stack.storage
}
@@ -85,7 +88,7 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
// runtime.Callers truncates the recorded stacktrace if there is no
// room in the provided slice. For the full stack trace, keep expanding
// storage until there are fewer frames than there is room.
- if depth == stacktraceFull {
+ if depth == Full {
pcs := stack.pcs
for numFrames == len(pcs) {
pcs = make([]uintptr, len(pcs)*2)
@@ -107,50 +110,54 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
// Free releases resources associated with this stacktrace
// and returns it back to the pool.
-func (st *stacktrace) Free() {
+func (st *Stack) Free() {
st.frames = nil
st.pcs = nil
- _stacktracePool.Put(st)
+ _stackPool.Put(st)
}
// Count reports the total number of frames in this stacktrace.
// Count DOES NOT change as Next is called.
-func (st *stacktrace) Count() int {
+func (st *Stack) Count() int {
return len(st.pcs)
}
// Next returns the next frame in the stack trace,
// and a boolean indicating whether there are more after it.
-func (st *stacktrace) Next() (_ runtime.Frame, more bool) {
+func (st *Stack) Next() (_ runtime.Frame, more bool) {
return st.frames.Next()
}
-func takeStacktrace(skip int) string {
- stack := captureStacktrace(skip+1, stacktraceFull)
+// Take returns a string representation of the current stacktrace.
+//
+// skip is the number of frames to skip before recording the stack trace.
+// skip=0 identifies the caller of Take.
+func Take(skip int) string {
+ stack := Capture(skip+1, Full)
defer stack.Free()
buffer := bufferpool.Get()
defer buffer.Free()
- stackfmt := newStackFormatter(buffer)
+ stackfmt := NewFormatter(buffer)
stackfmt.FormatStack(stack)
return buffer.String()
}
-// stackFormatter formats a stack trace into a readable string representation.
-type stackFormatter struct {
+// Formatter formats a stack trace into a readable string representation.
+type Formatter struct {
b *buffer.Buffer
nonEmpty bool // whehther we've written at least one frame already
}
-// newStackFormatter builds a new stackFormatter.
-func newStackFormatter(b *buffer.Buffer) stackFormatter {
- return stackFormatter{b: b}
+// NewFormatter builds a new Formatter.
+func NewFormatter(b *buffer.Buffer) Formatter {
+ return Formatter{b: b}
}
// FormatStack formats all remaining frames in the provided stacktrace -- minus
// the final runtime.main/runtime.goexit frame.
-func (sf *stackFormatter) FormatStack(stack *stacktrace) {
+func (sf *Formatter) FormatStack(stack *Stack) {
// Note: On the last iteration, frames.Next() returns false, with a valid
// frame, but we ignore this frame. The last frame is a runtime frame which
// adds noise, since it's only either runtime.main or runtime.goexit.
@@ -160,7 +167,7 @@ func (sf *stackFormatter) FormatStack(stack *stacktrace) {
}
// FormatFrame formats the given frame.
-func (sf *stackFormatter) FormatFrame(frame runtime.Frame) {
+func (sf *Formatter) FormatFrame(frame runtime.Frame) {
if sf.nonEmpty {
sf.b.AppendByte('\n')
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/stacktrace_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/stacktrace/stack_test.go
index 82b6af3..195eeae 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/stacktrace_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/stacktrace/stack_test.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package zap
+package stacktrace
import (
"bytes"
@@ -29,20 +29,20 @@ import (
"github.com/stretchr/testify/require"
)
-func TestTakeStacktrace(t *testing.T) {
- trace := takeStacktrace(0)
+func TestTake(t *testing.T) {
+ trace := Take(0)
lines := strings.Split(trace, "\n")
require.NotEmpty(t, lines, "Expected stacktrace to have at least one frame.")
assert.Contains(
t,
lines[0],
- "go.uber.org/zap.TestTakeStacktrace",
+ "go.uber.org/zap/internal/stacktrace.TestTake",
"Expected stacktrace to start with the test.",
)
}
-func TestTakeStacktraceWithSkip(t *testing.T) {
- trace := takeStacktrace(1)
+func TestTakeWithSkip(t *testing.T) {
+ trace := Take(1)
lines := strings.Split(trace, "\n")
require.NotEmpty(t, lines, "Expected stacktrace to have at least one frame.")
assert.Contains(
@@ -53,10 +53,10 @@ func TestTakeStacktraceWithSkip(t *testing.T) {
)
}
-func TestTakeStacktraceWithSkipInnerFunc(t *testing.T) {
+func TestTakeWithSkipInnerFunc(t *testing.T) {
var trace string
func() {
- trace = takeStacktrace(2)
+ trace = Take(2)
}()
lines := strings.Split(trace, "\n")
require.NotEmpty(t, lines, "Expected stacktrace to have at least one frame.")
@@ -68,13 +68,13 @@ func TestTakeStacktraceWithSkipInnerFunc(t *testing.T) {
)
}
-func TestTakeStacktraceDeepStack(t *testing.T) {
+func TestTakeDeepStack(t *testing.T) {
const (
N = 500
- withStackDepthName = "go.uber.org/zap.withStackDepth"
+ withStackDepthName = "go.uber.org/zap/internal/stacktrace.withStackDepth"
)
withStackDepth(N, func() {
- trace := takeStacktrace(0)
+ trace := Take(0)
for found := 0; found < N; found++ {
i := strings.Index(trace, withStackDepthName)
if i < 0 {
@@ -86,9 +86,9 @@ func TestTakeStacktraceDeepStack(t *testing.T) {
})
}
-func BenchmarkTakeStacktrace(b *testing.B) {
+func BenchmarkTake(b *testing.B) {
for i := 0; i < b.N; i++ {
- takeStacktrace(0)
+ Take(0)
}
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/clock.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/clock.go
new file mode 100644
index 0000000..47b0b7f
--- /dev/null
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/clock.go
@@ -0,0 +1,153 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ztest
+
+import (
+ "sort"
+ "sync"
+ "time"
+)
+
+// MockClock is a fake source of time.
+// It implements standard time operations,
+// but allows the user to control the passage of time.
+//
+// Use the [Add] method to progress time.
+type MockClock struct {
+ mu sync.RWMutex
+ now time.Time
+
+ // The MockClock works by maintaining a list of waiters.
+ // Each waiter knows the time at which it should be resolved.
+ // When the clock advances, all waiters that are in range are resolved
+ // in chronological order.
+ waiters []waiter
+}
+
+// NewMockClock builds a new mock clock
+// using the current actual time as the initial time.
+func NewMockClock() *MockClock {
+ return &MockClock{
+ now: time.Now(),
+ }
+}
+
+// Now reports the current time.
+func (c *MockClock) Now() time.Time {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.now
+}
+
+// NewTicker returns a time.Ticker that ticks at the specified frequency.
+//
+// As with [time.NewTicker],
+// the ticker will drop ticks if the receiver is slow,
+// and the channel is never closed.
+//
+// Calling Stop on the returned ticker is a no-op.
+// The ticker only runs when the clock is advanced.
+func (c *MockClock) NewTicker(d time.Duration) *time.Ticker {
+ ch := make(chan time.Time, 1)
+
+ var tick func(time.Time)
+ tick = func(now time.Time) {
+ next := now.Add(d)
+ c.runAt(next, func() {
+ defer tick(next)
+
+ select {
+ case ch <- next:
+ // ok
+ default:
+ // The receiver is slow.
+ // Drop the tick and continue.
+ }
+ })
+ }
+ tick(c.Now())
+
+ return &time.Ticker{C: ch}
+}
+
+// runAt schedules the given function to be run at the given time.
+// The function runs without a lock held, so it may schedule more work.
+func (c *MockClock) runAt(t time.Time, fn func()) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.waiters = append(c.waiters, waiter{until: t, fn: fn})
+}
+
+type waiter struct {
+ until time.Time
+ fn func()
+}
+
+// Add progresses time by the given duration.
+// Other operations waiting for the time to advance
+// will be resolved if they are within range.
+//
+// Side effects of operations waiting for the time to advance
+// will take effect on a best-effort basis.
+// Avoid racing with operations that have side effects.
+//
+// Panics if the duration is negative.
+func (c *MockClock) Add(d time.Duration) {
+ if d < 0 {
+ panic("cannot add negative duration")
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ sort.Slice(c.waiters, func(i, j int) bool {
+ return c.waiters[i].until.Before(c.waiters[j].until)
+ })
+
+ newTime := c.now.Add(d)
+ // newTime won't be recorded until the end of this method.
+ // This ensures that any waiters that are resolved
+ // are resolved at the time they were expecting.
+
+ for len(c.waiters) > 0 {
+ w := c.waiters[0]
+ if w.until.After(newTime) {
+ break
+ }
+ c.waiters[0] = waiter{} // avoid memory leak
+ c.waiters = c.waiters[1:]
+
+ // The waiter is within range.
+ // Travel to the time of the waiter and resolve it.
+ c.now = w.until
+
+ // The waiter may schedule more work
+ // so we must release the lock.
+ c.mu.Unlock()
+ w.fn()
+ // Sleeping here is necessary to let the side effects of waiters
+ // take effect before we continue.
+ time.Sleep(1 * time.Millisecond)
+ c.mu.Lock()
+ }
+
+ c.now = newTime
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/clock_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/clock_test.go
index 3808ed7..6db724b 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/clock_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/clock_test.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2021 Uber Technologies, Inc.
+// Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -55,3 +55,26 @@ func TestMockClock_NewTicker(t *testing.T) {
assert.Equal(t, int32(2), n.Load())
close(quit)
}
+
+func TestMockClock_NewTicker_slowConsumer(t *testing.T) {
+ clock := NewMockClock()
+
+ ticker := clock.NewTicker(time.Microsecond)
+ defer ticker.Stop()
+
+ // Two ticks, only one consumed.
+ clock.Add(2 * time.Microsecond)
+ <-ticker.C
+
+ select {
+ case <-ticker.C:
+ t.Fatal("unexpected tick")
+ default:
+ // ok
+ }
+}
+
+func TestMockClock_Add_negative(t *testing.T) {
+ clock := NewMockClock()
+ assert.Panics(t, func() { clock.Add(-1) })
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/doc.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/doc.go
index cd4b98c..cd4b98c 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/doc.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/doc.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/timeout.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/timeout.go
index e4222f9..e4222f9 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/timeout.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/timeout.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/writer.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/writer.go
index f54d856..f54d856 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/writer.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/internal/ztest/writer.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/leak_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/leak_test.go
index 474ed2f..474ed2f 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/leak_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/leak_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/level.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/level.go
index 155b208..155b208 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/level.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/level.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/level_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/level_test.go
index db7391d..db7391d 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/level_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/level_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/logger.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/logger.go
index 0e95480..c4d3003 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/logger.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/logger.go
@@ -27,6 +27,7 @@ import (
"strings"
"go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
@@ -42,6 +43,7 @@ type Logger struct {
development bool
addCaller bool
+ onPanic zapcore.CheckWriteHook // default is WriteThenPanic
onFatal zapcore.CheckWriteHook // default is WriteThenFatal
name string
@@ -173,7 +175,8 @@ func (log *Logger) WithOptions(opts ...Option) *Logger {
}
// With creates a child logger and adds structured context to it. Fields added
-// to the child don't affect the parent, and vice versa.
+// to the child don't affect the parent, and vice versa. Any fields that
+// require evaluation (such as Objects) are evaluated upon invocation of With.
func (log *Logger) With(fields ...Field) *Logger {
if len(fields) == 0 {
return log
@@ -183,6 +186,28 @@ func (log *Logger) With(fields ...Field) *Logger {
return l
}
+// WithLazy creates a child logger and adds structured context to it lazily.
+//
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// WithLazy provides a worthwhile performance optimization for contextual loggers
+// when the likelihood of using the child logger is low,
+// such as error paths and rarely taken branches.
+//
+// Similar to [With], fields added to the child don't affect the parent, and vice versa.
+func (log *Logger) WithLazy(fields ...Field) *Logger {
+ if len(fields) == 0 {
+ return log
+ }
+ return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core {
+ return zapcore.NewLazyWith(core, fields)
+ }))
+}
+
// Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
@@ -199,6 +224,8 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Log logs a message at the specified level. The message includes any fields
// passed at the log site, as well as any fields accumulated on the logger.
+// Any Fields that require evaluation (such as Objects) are evaluated upon
+// invocation of Log.
func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
if ce := log.check(lvl, msg); ce != nil {
ce.Write(fields...)
@@ -288,8 +315,8 @@ func (log *Logger) Name() string {
}
func (log *Logger) clone() *Logger {
- copy := *log
- return &copy
+ clone := *log
+ return &clone
}
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
@@ -319,27 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Set up any required terminal behavior.
switch ent.Level {
case zapcore.PanicLevel:
- ce = ce.After(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
case zapcore.FatalLevel:
- onFatal := log.onFatal
- // nil or WriteThenNoop will lead to continued execution after
- // a Fatal log entry, which is unexpected. For example,
- //
- // f, err := os.Open(..)
- // if err != nil {
- // log.Fatal("cannot open", zap.Error(err))
- // }
- // fmt.Println(f.Name())
- //
- // The f.Name() will panic if we continue execution after the
- // log.Fatal.
- if onFatal == nil || onFatal == zapcore.WriteThenNoop {
- onFatal = zapcore.WriteThenFatal
- }
- ce = ce.After(ent, onFatal)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal))
case zapcore.DPanicLevel:
if log.development {
- ce = ce.After(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
}
}
@@ -360,17 +372,17 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Adding the caller or stack trace requires capturing the callers of
// this function. We'll share information between these two.
- stackDepth := stacktraceFirst
+ stackDepth := stacktrace.First
if addStack {
- stackDepth = stacktraceFull
+ stackDepth = stacktrace.Full
}
- stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth)
+ stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth)
defer stack.Free()
if stack.Count() == 0 {
if log.addCaller {
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
- log.errorOutput.Sync()
+ _ = log.errorOutput.Sync()
}
return ce
}
@@ -391,7 +403,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
buffer := bufferpool.Get()
defer buffer.Free()
- stackfmt := newStackFormatter(buffer)
+ stackfmt := stacktrace.NewFormatter(buffer)
// We've already extracted the first frame, so format that
// separately and defer to stackfmt for the rest.
@@ -404,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
return ce
}
+
+func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook {
+ // A nil or WriteThenNoop hook will lead to continued execution after
+ // a Panic or Fatal log entry, which is unexpected. For example,
+ //
+ // f, err := os.Open(..)
+ // if err != nil {
+ // log.Fatal("cannot open", zap.Error(err))
+ // }
+ // fmt.Println(f.Name())
+ //
+ // The f.Name() will panic if we continue execution after the log.Fatal.
+ if override == nil || override == zapcore.WriteThenNoop {
+ return defaultHook
+ }
+ return override
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/logger_bench_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/logger_bench_test.go
index bcf501a..9d41298 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/logger_bench_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/logger_bench_test.go
@@ -23,6 +23,7 @@ package zap
import (
"errors"
"runtime"
+ "strconv"
"sync"
"testing"
"time"
@@ -199,6 +200,48 @@ func BenchmarkAddCallerAndStacktrace(b *testing.B) {
})
}
+func Benchmark5WithsUsed(b *testing.B) {
+ benchmarkWithUsed(b, (*Logger).With, 5, true)
+}
+
+// This benchmark will be used in future as a
+// baseline for improving
+func Benchmark5WithsNotUsed(b *testing.B) {
+ benchmarkWithUsed(b, (*Logger).With, 5, false)
+}
+
+func Benchmark5WithLazysUsed(b *testing.B) {
+ benchmarkWithUsed(b, (*Logger).WithLazy, 5, true)
+}
+
+// This benchmark will be used in future as a
+// baseline for improving
+func Benchmark5WithLazysNotUsed(b *testing.B) {
+ benchmarkWithUsed(b, (*Logger).WithLazy, 5, false)
+}
+
+func benchmarkWithUsed(b *testing.B, withMethod func(*Logger, ...zapcore.Field) *Logger, N int, use bool) {
+ keys := make([]string, N)
+ values := make([]string, N)
+ for i := 0; i < N; i++ {
+ keys[i] = "k" + strconv.Itoa(i)
+ values[i] = "v" + strconv.Itoa(i)
+ }
+
+ b.ResetTimer()
+
+ withBenchedLogger(b, func(log *Logger) {
+ for i := 0; i < N; i++ {
+ log = withMethod(log, String(keys[i], values[i]))
+ }
+ if use {
+ log.Info("used")
+ return
+ }
+ runtime.KeepAlive(log)
+ })
+}
+
func Benchmark10Fields(b *testing.B) {
withBenchedLogger(b, func(log *Logger) {
log.Info("Ten fields, passed at the log site.",
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/logger_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/logger_test.go
index d4af575..4a953b6 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/logger_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/logger_test.go
@@ -22,6 +22,8 @@ package zap
import (
"errors"
+ "fmt"
+ "strconv"
"sync"
"sync/atomic"
"testing"
@@ -124,20 +126,250 @@ func TestLoggerInitialFields(t *testing.T) {
}
func TestLoggerWith(t *testing.T) {
- fieldOpts := opts(Fields(Int("foo", 42)))
- withLogger(t, DebugLevel, fieldOpts, func(logger *Logger, logs *observer.ObservedLogs) {
- // Child loggers should have copy-on-write semantics, so two children
- // shouldn't stomp on each other's fields or affect the parent's fields.
- logger.With(String("one", "two")).Info("")
- logger.With(String("three", "four")).Info("")
- logger.Info("")
+ tests := []struct {
+ name string
+ initialFields []Field
+ withMethod func(*Logger, ...Field) *Logger
+ }{
+ {
+ "regular non lazy logger",
+ []Field{Int("foo", 42)},
+ (*Logger).With,
+ },
+ {
+ "regular non lazy logger no initial fields",
+ []Field{},
+ (*Logger).With,
+ },
+ {
+ "lazy with logger",
+ []Field{Int("foo", 42)},
+ (*Logger).WithLazy,
+ },
+ {
+ "lazy with logger no initial fields",
+ []Field{},
+ (*Logger).WithLazy,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ withLogger(t, DebugLevel, opts(Fields(tt.initialFields...)), func(logger *Logger, logs *observer.ObservedLogs) {
+ // Child loggers should have copy-on-write semantics, so two children
+ // shouldn't stomp on each other's fields or affect the parent's fields.
+ tt.withMethod(logger).Info("")
+ tt.withMethod(logger, String("one", "two")).Info("")
+ tt.withMethod(logger, String("three", "four")).Info("")
+ tt.withMethod(logger, String("five", "six")).With(String("seven", "eight")).Info("")
+ logger.Info("")
- assert.Equal(t, []observer.LoggedEntry{
- {Context: []Field{Int("foo", 42), String("one", "two")}},
- {Context: []Field{Int("foo", 42), String("three", "four")}},
- {Context: []Field{Int("foo", 42)}},
- }, logs.AllUntimed(), "Unexpected cross-talk between child loggers.")
- })
+ assert.Equal(t, []observer.LoggedEntry{
+ {Context: tt.initialFields},
+ {Context: append(tt.initialFields, String("one", "two"))},
+ {Context: append(tt.initialFields, String("three", "four"))},
+ {Context: append(tt.initialFields, String("five", "six"), String("seven", "eight"))},
+ {Context: tt.initialFields},
+ }, logs.AllUntimed(), "Unexpected cross-talk between child loggers.")
+ })
+ })
+ }
+}
+
+func TestLoggerWithCaptures(t *testing.T) {
+ type withF func(*Logger, ...Field) *Logger
+ tests := []struct {
+ name string
+ withMethods []withF
+ wantJSON []string
+ }{
+ {
+ name: "regular with captures arguments at time of With",
+ withMethods: []withF{(*Logger).With},
+ wantJSON: []string{
+ `{
+ "m": "hello 0",
+ "a0": [0],
+ "b0": [1]
+ }`,
+ `{
+ "m": "world 0",
+ "a0": [0],
+ "c0": [2]
+ }`,
+ },
+ },
+ {
+ name: "lazy with captures arguments at time of With or Logging",
+ withMethods: []withF{(*Logger).WithLazy},
+ wantJSON: []string{
+ `{
+ "m": "hello 0",
+ "a0": [1],
+ "b0": [1]
+ }`,
+ `{
+ "m": "world 0",
+ "a0": [1],
+ "c0": [2]
+ }`,
+ },
+ },
+ {
+ name: "2x With captures arguments at time of each With",
+ withMethods: []withF{(*Logger).With, (*Logger).With},
+ wantJSON: []string{
+ `{
+ "m": "hello 0",
+ "a0": [0],
+ "b0": [1]
+ }`,
+ `{
+ "m": "world 0",
+ "a0": [0],
+ "c0": [2]
+ }`,
+ `{
+ "m": "hello 1",
+ "a0": [0],
+ "c0": [2],
+ "a1": [10],
+ "b1": [11]
+ }`,
+ `{
+ "m": "world 1",
+ "a0": [0],
+ "c0": [2],
+ "a1": [10],
+ "c1": [12]
+ }`,
+ },
+ },
+ {
+ name: "2x WithLazy. Captures arguments only at logging time.",
+ withMethods: []withF{(*Logger).WithLazy, (*Logger).WithLazy},
+ wantJSON: []string{
+ `{
+ "m": "hello 0",
+ "a0": [1],
+ "b0": [1]
+ }`,
+ `{
+ "m": "world 0",
+ "a0": [1],
+ "c0": [2]
+ }`,
+ `{
+ "m": "hello 1",
+ "a0": [1],
+ "c0": [2],
+ "a1": [11],
+ "b1": [11]
+ }`,
+ `{
+ "m": "world 1",
+ "a0": [1],
+ "c0": [2],
+ "a1": [11],
+ "c1": [12]
+ }`,
+ },
+ },
+ {
+ name: "WithLazy then With",
+ withMethods: []withF{(*Logger).WithLazy, (*Logger).With},
+ wantJSON: []string{
+ `{
+ "m": "hello 0",
+ "a0": [1],
+ "b0": [1]
+ }`,
+ `{
+ "m": "world 0",
+ "a0": [1],
+ "c0": [2]
+ }`,
+ `{
+ "m": "hello 1",
+ "a0": [1],
+ "c0": [2],
+ "a1": [10],
+ "b1": [11]
+ }`,
+ `{
+ "m": "world 1",
+ "a0": [1],
+ "c0": [2],
+ "a1": [10],
+ "c1": [12]
+ }`,
+ },
+ },
+ {
+ name: "With then WithLazy",
+ withMethods: []withF{(*Logger).With, (*Logger).WithLazy},
+ wantJSON: []string{
+ `{
+ "m": "hello 0",
+ "a0": [0],
+ "b0": [1]
+ }`,
+ `{
+ "m": "world 0",
+ "a0": [0],
+ "c0": [2]
+ }`,
+ `{
+ "m": "hello 1",
+ "a0": [0],
+ "c0": [2],
+ "a1": [11],
+ "b1": [11]
+ }`,
+ `{
+ "m": "world 1",
+ "a0": [0],
+ "c0": [2],
+ "a1": [11],
+ "c1": [12]
+ }`,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ enc := zapcore.NewJSONEncoder(zapcore.EncoderConfig{
+ MessageKey: "m",
+ })
+
+ var bs ztest.Buffer
+ logger := New(zapcore.NewCore(enc, &bs, DebugLevel))
+
+ for i, withMethod := range tt.withMethods {
+
+ iStr := strconv.Itoa(i)
+ x := 10 * i
+ arr := zapcore.ArrayMarshalerFunc(func(enc zapcore.ArrayEncoder) error {
+ enc.AppendInt(x)
+ return nil
+ })
+
+ // Demonstrate the arguments are captured when With() and Info() are invoked.
+ logger = withMethod(logger, Array("a"+iStr, arr))
+ x++
+ logger.Info(fmt.Sprintf("hello %d", i), Array("b"+iStr, arr))
+ x++
+ logger = withMethod(logger, Array("c"+iStr, arr))
+ logger.Info(fmt.Sprintf("world %d", i))
+ }
+
+ if lines := bs.Lines(); assert.Len(t, lines, len(tt.wantJSON)) {
+ for i, want := range tt.wantJSON {
+ assert.JSONEq(t, want, lines[i], "Unexpected output from the %d'th log.", i)
+ }
+ }
+ })
+ }
}
func TestLoggerLogPanic(t *testing.T) {
@@ -604,6 +836,130 @@ func TestLoggerFatalOnNoop(t *testing.T) {
assert.Equal(t, 1, exitStub.Code, "must exit with status 1 for WriteThenNoop")
}
+func TestLoggerCustomOnPanic(t *testing.T) {
+ tests := []struct {
+ msg string
+ level zapcore.Level
+ opts []Option
+ finished bool
+ want []observer.LoggedEntry
+ recoverValue any
+ }{
+ {
+ msg: "panic with nil hook",
+ level: PanicLevel,
+ opts: opts(WithPanicHook(nil)),
+ finished: false,
+ want: []observer.LoggedEntry{
+ {
+ Entry: zapcore.Entry{Level: PanicLevel, Message: "foobar"},
+ Context: []Field{},
+ },
+ },
+ recoverValue: "foobar",
+ },
+ {
+ msg: "panic with noop hook",
+ level: PanicLevel,
+ opts: opts(WithPanicHook(zapcore.WriteThenNoop)),
+ finished: false,
+ want: []observer.LoggedEntry{
+ {
+ Entry: zapcore.Entry{Level: PanicLevel, Message: "foobar"},
+ Context: []Field{},
+ },
+ },
+ recoverValue: "foobar",
+ },
+ {
+ msg: "no panic with goexit hook",
+ level: PanicLevel,
+ opts: opts(WithPanicHook(zapcore.WriteThenGoexit)),
+ finished: false,
+ want: []observer.LoggedEntry{
+ {
+ Entry: zapcore.Entry{Level: PanicLevel, Message: "foobar"},
+ Context: []Field{},
+ },
+ },
+ recoverValue: nil,
+ },
+ {
+ msg: "dpanic no panic in development mode with goexit hook",
+ level: DPanicLevel,
+ opts: opts(WithPanicHook(zapcore.WriteThenGoexit), Development()),
+ finished: false,
+ want: []observer.LoggedEntry{
+ {
+ Entry: zapcore.Entry{Level: DPanicLevel, Message: "foobar"},
+ Context: []Field{},
+ },
+ },
+ recoverValue: nil,
+ },
+ {
+ msg: "dpanic panic in development mode with noop hook",
+ level: DPanicLevel,
+ opts: opts(WithPanicHook(zapcore.WriteThenNoop), Development()),
+ finished: false,
+ want: []observer.LoggedEntry{
+ {
+ Entry: zapcore.Entry{Level: DPanicLevel, Message: "foobar"},
+ Context: []Field{},
+ },
+ },
+ recoverValue: "foobar",
+ },
+ {
+ msg: "dpanic no exit in production mode with goexit hook",
+ level: DPanicLevel,
+ opts: opts(WithPanicHook(zapcore.WriteThenPanic)),
+ finished: true,
+ want: []observer.LoggedEntry{
+ {
+ Entry: zapcore.Entry{Level: DPanicLevel, Message: "foobar"},
+ Context: []Field{},
+ },
+ },
+ recoverValue: nil,
+ },
+ {
+ msg: "dpanic no panic in production mode with panic hook",
+ level: DPanicLevel,
+ opts: opts(WithPanicHook(zapcore.WriteThenPanic)),
+ finished: true,
+ want: []observer.LoggedEntry{
+ {
+ Entry: zapcore.Entry{Level: DPanicLevel, Message: "foobar"},
+ Context: []Field{},
+ },
+ },
+ recoverValue: nil,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.msg, func(t *testing.T) {
+ withLogger(t, InfoLevel, tt.opts, func(logger *Logger, logs *observer.ObservedLogs) {
+ var finished bool
+ recovered := make(chan any)
+ go func() {
+ defer func() {
+ recovered <- recover()
+ }()
+
+ logger.Log(tt.level, "foobar")
+ finished = true
+ }()
+
+ assert.Equal(t, tt.recoverValue, <-recovered, "unexpected value from recover()")
+ assert.Equal(t, tt.finished, finished, "expect goroutine finished state doesn't match")
+ assert.Equal(t, tt.want, logs.AllUntimed(), "unexpected logs")
+ })
+ })
+ }
+}
+
func TestLoggerCustomOnFatal(t *testing.T) {
tests := []struct {
msg string
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/options.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/options.go
index c4f3bca..43d357a 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/options.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/options.go
@@ -132,6 +132,21 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
})
}
+// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs.
+// Zap will call this hook after writing a log statement with a Panic/DPanic level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a Panic/DPanic log message, but it will not start a panic.
+//
+// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit))
+//
+// This is useful for testing Panic/DPanic log output.
+func WithPanicHook(hook zapcore.CheckWriteHook) Option {
+ return optionFunc(func(log *Logger) {
+ log.onPanic = hook
+ })
+}
+
// OnFatal sets the action to take on fatal logs.
//
// Deprecated: Use [WithFatalHook] instead.
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sink.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sink.go
index 478c9a1..499772a 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sink.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sink.go
@@ -66,7 +66,8 @@ func newSinkRegistry() *sinkRegistry {
factories: make(map[string]func(*url.URL) (Sink, error)),
openFile: os.OpenFile,
}
- sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
+ // Infallible operation: the registry is empty, so we can't have a conflict.
+ _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
return sr
}
@@ -154,7 +155,7 @@ func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
case "stderr":
return nopCloserSink{os.Stderr}, nil
}
- return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+ return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666)
}
func normalizeScheme(s string) (string, error) {
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sink_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sink_test.go
index 0dfa616..5fc37be 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sink_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sink_test.go
@@ -68,13 +68,12 @@ func TestRegisterSink(t *testing.T) {
require.NoError(t, RegisterSink(strings.ToUpper(memScheme), memFactory), "Failed to register scheme %q.", memScheme)
require.NoError(t, RegisterSink(nopScheme, nopFactory), "Failed to register scheme %q.", nopScheme)
- sink, close, err := Open(
+ sink, closeSink, err := Open(
memScheme+"://somewhere",
nopScheme+"://somewhere-else",
)
require.NoError(t, err, "Unexpected error opening URLs with registered schemes.")
-
- defer close()
+ defer closeSink()
assert.Equal(t, 1, memCalls, "Unexpected number of calls to memory factory.")
assert.Equal(t, 1, nopCalls, "Unexpected number of calls to no-op factory.")
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sink_windows_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sink_windows_test.go
index fd6a475..fd6a475 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sink_windows_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sink_windows_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/stacktrace_ext_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/stacktrace_ext_test.go
index 71f0983..9f018aa 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/stacktrace_ext_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/stacktrace_ext_test.go
@@ -97,7 +97,7 @@ func TestStacktraceFiltersVendorZap(t *testing.T) {
testDir := filepath.Join(goPath, "src/go.uber.org/zap_test/")
vendorDir := filepath.Join(testDir, "vendor")
- require.NoError(t, os.MkdirAll(testDir, 0777), "Failed to create source director")
+ require.NoError(t, os.MkdirAll(testDir, 0o777), "Failed to create source director")
curFile := getSelfFilename(t)
setupSymlink(t, curFile, filepath.Join(testDir, curFile))
@@ -175,7 +175,7 @@ func getSelfFilename(t *testing.T) string {
func setupSymlink(t *testing.T, src, dst string) {
// Make sure the destination directory exists.
- os.MkdirAll(filepath.Dir(dst), 0777)
+ require.NoError(t, os.MkdirAll(filepath.Dir(dst), 0o777))
// Get absolute path of the source for the symlink, otherwise we can create a symlink
// that uses relative paths.
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sugar.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sugar.go
index 00ac5fe..8904cd0 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sugar.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sugar.go
@@ -115,6 +115,21 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
}
+// WithLazy adds a variadic number of fields to the logging context lazily.
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// Similar to [With], fields added to the child don't affect the parent,
+// and vice versa. Also, the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics, while in production it logs an error and skips the pair.
+// Passing an orphaned key has the same behavior.
+func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger {
+ return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)}
+}
+
// Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
@@ -122,6 +137,12 @@ func (s *SugaredLogger) Level() zapcore.Level {
return zapcore.LevelOf(s.base.core)
}
+// Log logs the provided arguments at provided level.
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) {
+ s.log(lvl, "", args, nil)
+}
+
// Debug logs the provided arguments at [DebugLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Debug(args ...interface{}) {
@@ -165,6 +186,12 @@ func (s *SugaredLogger) Fatal(args ...interface{}) {
s.log(FatalLevel, "", args, nil)
}
+// Logf formats the message according to the format specifier
+// and logs it at provided level.
+func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) {
+ s.log(lvl, template, args, nil)
+}
+
// Debugf formats the message according to the format specifier
// and logs it at [DebugLevel].
func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
@@ -208,6 +235,12 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
s.log(FatalLevel, template, args, nil)
}
+// Logw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) {
+ s.log(lvl, msg, nil, keysAndValues)
+}
+
// Debugw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
//
@@ -255,6 +288,12 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
s.log(FatalLevel, msg, nil, keysAndValues)
}
+// Logln logs a message at provided level.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) {
+ s.logln(lvl, args, nil)
+}
+
// Debugln logs a message at [DebugLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Debugln(args ...interface{}) {
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sugar_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sugar_test.go
index 9e914ec..8ca2bdd 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/sugar_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/sugar_test.go
@@ -22,6 +22,8 @@ package zap
import (
"errors"
+ "fmt"
+ "strconv"
"testing"
"go.uber.org/zap/internal/exit"
@@ -54,6 +56,9 @@ func TestSugarWith(t *testing.T) {
}
}
+ type withAny func(*SugaredLogger, ...interface{}) *SugaredLogger
+ withMethods := []withAny{(*SugaredLogger).With, (*SugaredLogger).WithLazy}
+
tests := []struct {
desc string
args []interface{}
@@ -141,16 +146,94 @@ func TestSugarWith(t *testing.T) {
}
for _, tt := range tests {
- withSugar(t, DebugLevel, nil, func(logger *SugaredLogger, logs *observer.ObservedLogs) {
- logger.With(tt.args...).Info("")
- output := logs.AllUntimed()
- if len(tt.errLogs) > 0 {
- for i := range tt.errLogs {
- assert.Equal(t, tt.errLogs[i], output[i], "Unexpected error log at position %d for scenario %s.", i, tt.desc)
+ for _, withMethod := range withMethods {
+ withSugar(t, DebugLevel, nil, func(logger *SugaredLogger, logs *observer.ObservedLogs) {
+ withMethod(logger, tt.args...).Info("")
+ output := logs.AllUntimed()
+ if len(tt.errLogs) > 0 {
+ for i := range tt.errLogs {
+ assert.Equal(t, tt.errLogs[i], output[i], "Unexpected error log at position %d for scenario %s.", i, tt.desc)
+ }
+ }
+ assert.Equal(t, len(tt.errLogs)+1, len(output), "Expected only one non-error message to be logged in scenario %s.", tt.desc)
+ assert.Equal(t, tt.expected, output[len(tt.errLogs)].Context, "Unexpected message context in scenario %s.", tt.desc)
+ })
+ }
+ }
+}
+
+func TestSugarWithCaptures(t *testing.T) {
+ type withAny func(*SugaredLogger, ...interface{}) *SugaredLogger
+
+ tests := []struct {
+ name string
+ withMethods []withAny
+ wantJSON []string
+ }{
+ {
+ name: "with captures arguments at time of With",
+ withMethods: []withAny{(*SugaredLogger).With},
+ wantJSON: []string{
+ `{
+ "m": "hello 0",
+ "a0": [0],
+ "b0": [1]
+ }`,
+ `{
+ "m": "world 0",
+ "a0": [0],
+ "c0": [2]
+ }`,
+ },
+ },
+ {
+ name: "lazy with captures arguments at time of Logging",
+ withMethods: []withAny{(*SugaredLogger).WithLazy},
+ wantJSON: []string{
+ `{
+ "m": "hello 0",
+ "a0": [1],
+ "b0": [1]
+ }`,
+ `{
+ "m": "world 0",
+ "a0": [1],
+ "c0": [2]
+ }`,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ enc := zapcore.NewJSONEncoder(zapcore.EncoderConfig{
+ MessageKey: "m",
+ })
+
+ var bs ztest.Buffer
+ logger := New(zapcore.NewCore(enc, &bs, DebugLevel)).Sugar()
+
+ for i, withMethod := range tt.withMethods {
+ iStr := strconv.Itoa(i)
+ x := 10 * i
+ arr := zapcore.ArrayMarshalerFunc(func(enc zapcore.ArrayEncoder) error {
+ enc.AppendInt(x)
+ return nil
+ })
+
+ logger = withMethod(logger, Array("a"+iStr, arr))
+ x++
+ logger.Infow(fmt.Sprintf("hello %d", i), Array("b"+iStr, arr))
+ x++
+ logger = withMethod(logger, Array("c"+iStr, arr))
+ logger.Infow(fmt.Sprintf("world %d", i))
+ }
+
+ if lines := bs.Lines(); assert.Len(t, lines, len(tt.wantJSON)) {
+ for i, want := range tt.wantJSON {
+ assert.JSONEq(t, want, lines[i], "Unexpected output from the %d'th log.", i)
}
}
- assert.Equal(t, len(tt.errLogs)+1, len(output), "Expected only one non-error message to be logged in scenario %s.", tt.desc)
- assert.Equal(t, tt.expected, output[len(tt.errLogs)].Context, "Unexpected message context in scenario %s.", tt.desc)
})
}
}
@@ -228,9 +311,10 @@ func TestSugarStructuredLogging(t *testing.T) {
logger.With(context...).Warnw(tt.msg, extra...)
logger.With(context...).Errorw(tt.msg, extra...)
logger.With(context...).DPanicw(tt.msg, extra...)
+ logger.With(context...).Logw(WarnLevel, tt.msg, extra...)
- expected := make([]observer.LoggedEntry, 5)
- for i, lvl := range []zapcore.Level{DebugLevel, InfoLevel, WarnLevel, ErrorLevel, DPanicLevel} {
+ expected := make([]observer.LoggedEntry, 6)
+ for i, lvl := range []zapcore.Level{DebugLevel, InfoLevel, WarnLevel, ErrorLevel, DPanicLevel, WarnLevel} {
expected[i] = observer.LoggedEntry{
Entry: zapcore.Entry{Message: tt.expectMsg, Level: lvl},
Context: expectedFields,
@@ -260,9 +344,10 @@ func TestSugarConcatenatingLogging(t *testing.T) {
logger.With(context...).Warn(tt.args...)
logger.With(context...).Error(tt.args...)
logger.With(context...).DPanic(tt.args...)
+ logger.With(context...).Log(InfoLevel, tt.args...)
- expected := make([]observer.LoggedEntry, 5)
- for i, lvl := range []zapcore.Level{DebugLevel, InfoLevel, WarnLevel, ErrorLevel, DPanicLevel} {
+ expected := make([]observer.LoggedEntry, 6)
+ for i, lvl := range []zapcore.Level{DebugLevel, InfoLevel, WarnLevel, ErrorLevel, DPanicLevel, InfoLevel} {
expected[i] = observer.LoggedEntry{
Entry: zapcore.Entry{Message: tt.expect, Level: lvl},
Context: expectedFields,
@@ -296,9 +381,10 @@ func TestSugarTemplatedLogging(t *testing.T) {
logger.With(context...).Warnf(tt.format, tt.args...)
logger.With(context...).Errorf(tt.format, tt.args...)
logger.With(context...).DPanicf(tt.format, tt.args...)
+ logger.With(context...).Logf(ErrorLevel, tt.format, tt.args...)
- expected := make([]observer.LoggedEntry, 5)
- for i, lvl := range []zapcore.Level{DebugLevel, InfoLevel, WarnLevel, ErrorLevel, DPanicLevel} {
+ expected := make([]observer.LoggedEntry, 6)
+ for i, lvl := range []zapcore.Level{DebugLevel, InfoLevel, WarnLevel, ErrorLevel, DPanicLevel, ErrorLevel} {
expected[i] = observer.LoggedEntry{
Entry: zapcore.Entry{Message: tt.expect, Level: lvl},
Context: expectedFields,
@@ -332,9 +418,10 @@ func TestSugarLnLogging(t *testing.T) {
logger.With(context...).Warnln(tt.args...)
logger.With(context...).Errorln(tt.args...)
logger.With(context...).DPanicln(tt.args...)
+ logger.With(context...).Logln(InfoLevel, tt.args...)
- expected := make([]observer.LoggedEntry, 5)
- for i, lvl := range []zapcore.Level{DebugLevel, InfoLevel, WarnLevel, ErrorLevel, DPanicLevel} {
+ expected := make([]observer.LoggedEntry, 6)
+ for i, lvl := range []zapcore.Level{DebugLevel, InfoLevel, WarnLevel, ErrorLevel, DPanicLevel, InfoLevel} {
expected[i] = observer.LoggedEntry{
Entry: zapcore.Entry{Message: tt.expect, Level: lvl},
Context: expectedFields,
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/time.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/time.go
index c5a1f16..c5a1f16 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/time.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/time.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/time_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/time_test.go
index cb993ab..cb993ab 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/time_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/time_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/writer.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/writer.go
index f08728e..06768c6 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/writer.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/writer.go
@@ -48,21 +48,21 @@ import (
// os.Stdout and os.Stderr. When specified without a scheme, relative file
// paths also work.
func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
- writers, close, err := open(paths)
+ writers, closeAll, err := open(paths)
if err != nil {
return nil, nil, err
}
writer := CombineWriteSyncers(writers...)
- return writer, close, nil
+ return writer, closeAll, nil
}
func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
writers := make([]zapcore.WriteSyncer, 0, len(paths))
closers := make([]io.Closer, 0, len(paths))
- close := func() {
+ closeAll := func() {
for _, c := range closers {
- c.Close()
+ _ = c.Close()
}
}
@@ -77,11 +77,11 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
closers = append(closers, sink)
}
if openErr != nil {
- close()
+ closeAll()
return nil, nil, openErr
}
- return writers, close, nil
+ return writers, closeAll, nil
}
// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/writer_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/writer_test.go
index b743455..20e00b7 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/writer_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/writer_test.go
@@ -91,7 +91,6 @@ func TestOpen(t *testing.T) {
}
assert.True(t, fileExists(tempName))
- os.Remove(tempName)
}
func TestOpenPathsNotFound(t *testing.T) {
@@ -255,7 +254,8 @@ func TestOpenWithErroringSinkFactory(t *testing.T) {
func TestCombineWriteSyncers(t *testing.T) {
tw := &testWriter{"test", t}
w := CombineWriteSyncers(tw)
- w.Write([]byte("test"))
+ _, err := w.Write([]byte("test"))
+ assert.NoError(t, err, "Unexpected write error.")
}
func fileExists(name string) bool {
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/buffered_write_syncer.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/buffered_write_syncer.go
index a40e93b..a40e93b 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/buffered_write_syncer.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/buffered_write_syncer.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/buffered_write_syncer_bench_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/buffered_write_syncer_bench_test.go
index 1e3db59..56ad5f2 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/buffered_write_syncer_bench_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/buffered_write_syncer_bench_test.go
@@ -40,11 +40,15 @@ func BenchmarkBufferedWriteSyncer(b *testing.B) {
w := &BufferedWriteSyncer{
WS: AddSync(file),
}
- defer w.Stop()
+ defer func() {
+ assert.NoError(b, w.Stop(), "failed to stop buffered write syncer")
+ }()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- w.Write([]byte("foobarbazbabble"))
+ if _, err := w.Write([]byte("foobarbazbabble")); err != nil {
+ b.Fatal(err)
+ }
}
})
})
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/buffered_write_syncer_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/buffered_write_syncer_test.go
index 8a36ad6..d0f6037 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/buffered_write_syncer_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/buffered_write_syncer_test.go
@@ -101,7 +101,8 @@ func TestBufferWriter(t *testing.T) {
n, err := ws.Write([]byte("foo"))
require.NoError(t, err, "Unexpected error writing to WriteSyncer.")
require.Equal(t, 3, n, "Wrote an unexpected number of bytes.")
- ws.Write([]byte("foo"))
+ _, err = ws.Write([]byte("foo"))
+ assert.Error(t, err, "Expected error writing to WriteSyncer.")
assert.Error(t, ws.Stop(), "Expected stop to fail.")
})
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/clock.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/clock.go
index 422fd82..422fd82 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/clock.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/clock.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/clock_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/clock_test.go
index 0dff349..0dff349 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/clock_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/clock_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/console_encoder.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/console_encoder.go
index 8ca0bfa..cc2b4e0 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/console_encoder.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/console_encoder.go
@@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
// If this ever becomes a performance bottleneck, we can implement
// ArrayEncoder for our plain-text format.
arr := getSliceEncoder()
- if c.TimeKey != "" && c.EncodeTime != nil {
+ if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() {
c.EncodeTime(ent.Time, arr)
}
if c.LevelKey != "" && c.EncodeLevel != nil {
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/console_encoder_bench_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/console_encoder_bench_test.go
index 62feaea..a2a360f 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/console_encoder_bench_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/console_encoder_bench_test.go
@@ -23,6 +23,7 @@ package zapcore_test
import (
"testing"
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
)
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/console_encoder_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/console_encoder_test.go
index b03f1a7..be8adc7 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/console_encoder_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/console_encoder_test.go
@@ -21,21 +21,65 @@ package zapcore_test
import (
"testing"
+ "time"
"github.com/stretchr/testify/assert"
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
)
-var (
- testEntry = Entry{
- LoggerName: "main",
- Level: InfoLevel,
- Message: `hello`,
- Time: _epoch,
- Stack: "fake-stack",
- Caller: EntryCaller{Defined: true, File: "foo.go", Line: 42, Function: "foo.Foo"},
+var testEntry = Entry{
+ LoggerName: "main",
+ Level: InfoLevel,
+ Message: `hello`,
+ Time: _epoch,
+ Stack: "fake-stack",
+ Caller: EntryCaller{Defined: true, File: "foo.go", Line: 42, Function: "foo.Foo"},
+}
+
+func TestConsoleEncodeEntry(t *testing.T) {
+ tests := []struct {
+ desc string
+ expected string
+ ent Entry
+ fields []Field
+ }{
+ {
+ desc: "info no fields",
+ expected: "2018-06-19T16:33:42Z\tinfo\tbob\tlob law\n",
+ ent: Entry{
+ Level: InfoLevel,
+ Time: time.Date(2018, 6, 19, 16, 33, 42, 99, time.UTC),
+ LoggerName: "bob",
+ Message: "lob law",
+ },
+ },
+ {
+ desc: "zero_time_omitted",
+ expected: "info\tname\tmessage\n",
+ ent: Entry{
+ Level: InfoLevel,
+ Time: time.Time{},
+ LoggerName: "name",
+ Message: "message",
+ },
+ },
}
-)
+
+ cfg := testEncoderConfig()
+ cfg.EncodeTime = RFC3339TimeEncoder
+ enc := NewConsoleEncoder(cfg)
+
+ for _, tt := range tests {
+ t.Run(tt.desc, func(t *testing.T) {
+ buf, err := enc.EncodeEntry(tt.ent, tt.fields)
+ if assert.NoError(t, err, "Unexpected console encoding error.") {
+ assert.Equal(t, tt.expected, buf.String(), "Incorrect encoded entry.")
+ }
+ buf.Free()
+ })
+ }
+}
func TestConsoleSeparator(t *testing.T) {
tests := []struct {
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/core.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/core.go
index 9dfd640..776e93f 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/core.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/core.go
@@ -102,9 +102,9 @@ func (c *ioCore) Write(ent Entry, fields []Field) error {
return err
}
if ent.Level > ErrorLevel {
- // Since we may be crashing the program, sync the output. Ignore Sync
- // errors, pending a clean solution to issue #370.
- c.Sync()
+ // Since we may be crashing the program, sync the output.
+ // Ignore Sync errors, pending a clean solution to issue #370.
+ _ = c.Sync()
}
return nil
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/core_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/core_test.go
index 1311097..3b23d2d 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/core_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/core_test.go
@@ -27,6 +27,7 @@ import (
"time"
"go.uber.org/zap/internal/ztest"
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
"github.com/stretchr/testify/assert"
@@ -148,7 +149,7 @@ func TestIOCoreSyncsOutput(t *testing.T) {
DebugLevel,
)
- core.Write(tt.entry, nil)
+ assert.NoError(t, core.Write(tt.entry, nil), "Unexpected error writing entry.")
assert.Equal(t, tt.shouldSync, sink.Called(), "Incorrect Sync behavior.")
}
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/doc.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/doc.go
index 31000e9..31000e9 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/doc.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/doc.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/encoder.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/encoder.go
index 5769ff3..0446254 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/encoder.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/encoder.go
@@ -37,6 +37,9 @@ const DefaultLineEnding = "\n"
const OmitKey = ""
// A LevelEncoder serializes a Level to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type LevelEncoder func(Level, PrimitiveArrayEncoder)
// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
@@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error {
}
// A TimeEncoder serializes a time.Time to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
@@ -219,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error {
}
// A DurationEncoder serializes a time.Duration to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
@@ -262,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error {
}
// A CallerEncoder serializes an EntryCaller to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
@@ -292,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error {
// A NameEncoder serializes a period-separated logger name to a primitive
// type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type NameEncoder func(string, PrimitiveArrayEncoder)
// FullNameEncoder serializes the logger name as-is.
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/encoder_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/encoder_test.go
index c0dbc5b..f89f489 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/encoder_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/encoder_test.go
@@ -30,6 +30,7 @@ import (
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
)
@@ -285,10 +286,11 @@ func TestEncoderConfiguration(t *testing.T) {
},
extra: func(enc Encoder) {
enc.AddTime("extra", _epoch)
- enc.AddArray("extras", ArrayMarshalerFunc(func(enc ArrayEncoder) error {
+ err := enc.AddArray("extras", ArrayMarshalerFunc(func(enc ArrayEncoder) error {
enc.AppendTime(_epoch)
return nil
}))
+ assert.NoError(t, err)
},
expectedJSON: `{"L":"info","T":"1970-01-01 00:00:00 +0000 UTC","N":"main","C":"foo.go:42","F":"foo.Foo","M":"hello","extra":"1970-01-01 00:00:00 +0000 UTC","extras":["1970-01-01 00:00:00 +0000 UTC"],"S":"fake-stack"}` + "\n",
expectedConsole: "1970-01-01 00:00:00 +0000 UTC\tinfo\tmain\tfoo.go:42\tfoo.Foo\thello\t" + // plain-text preamble
@@ -313,10 +315,11 @@ func TestEncoderConfiguration(t *testing.T) {
},
extra: func(enc Encoder) {
enc.AddDuration("extra", time.Second)
- enc.AddArray("extras", ArrayMarshalerFunc(func(enc ArrayEncoder) error {
+ err := enc.AddArray("extras", ArrayMarshalerFunc(func(enc ArrayEncoder) error {
enc.AppendDuration(time.Minute)
return nil
}))
+ assert.NoError(t, err)
},
expectedJSON: `{"L":"info","T":0,"N":"main","C":"foo.go:42","F":"foo.Foo","M":"hello","extra":"1s","extras":["1m0s"],"S":"fake-stack"}` + "\n",
expectedConsole: "0\tinfo\tmain\tfoo.go:42\tfoo.Foo\thello\t" + // preamble
@@ -720,10 +723,11 @@ func TestNameEncoders(t *testing.T) {
func assertAppended(t testing.TB, expected interface{}, f func(ArrayEncoder), msgAndArgs ...interface{}) {
mem := NewMapObjectEncoder()
- mem.AddArray("k", ArrayMarshalerFunc(func(arr ArrayEncoder) error {
+ err := mem.AddArray("k", ArrayMarshalerFunc(func(arr ArrayEncoder) error {
f(arr)
return nil
}))
+ assert.NoError(t, err, msgAndArgs...)
arr := mem.Fields["k"].([]interface{})
require.Equal(t, 1, len(arr), "Expected to append exactly one element to array.")
assert.Equal(t, expected, arr[0], msgAndArgs...)
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/entry.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/entry.go
index 059844f..459a5d7 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/entry.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/entry.go
@@ -242,7 +242,7 @@ func (ce *CheckedEntry) Write(fields ...Field) {
// CheckedEntry is being used after it was returned to the pool,
// the message may be an amalgamation from multiple call sites.
fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
- ce.ErrorOutput.Sync()
+ _ = ce.ErrorOutput.Sync() // ignore error
}
return
}
@@ -254,7 +254,7 @@ func (ce *CheckedEntry) Write(fields ...Field) {
}
if err != nil && ce.ErrorOutput != nil {
fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
- ce.ErrorOutput.Sync()
+ _ = ce.ErrorOutput.Sync() // ignore error
}
hook := ce.after
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/entry_ext_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/entry_ext_test.go
new file mode 100644
index 0000000..fd1a05c
--- /dev/null
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/entry_ext_test.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore_test
+
+import (
+ "bytes"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zaptest"
+)
+
+func TestCheckedEntryIllegalReuse(t *testing.T) {
+ t.Parallel()
+
+ var errOut bytes.Buffer
+
+ testCore := zaptest.NewLogger(t).Core()
+ ce := testCore.Check(zapcore.Entry{
+ Level: zapcore.InfoLevel,
+ Time: time.Now(),
+ Message: "hello",
+ }, nil)
+ ce.ErrorOutput = zapcore.AddSync(&errOut)
+
+ // The first write should succeed.
+ ce.Write(zap.String("k", "v"), zap.Int("n", 42))
+ assert.Empty(t, errOut.String(), "Expected no errors on first write.")
+
+ // The second write should fail.
+ ce.Write(zap.String("foo", "bar"), zap.Int("x", 1))
+ assert.Contains(t, errOut.String(), "Unsafe CheckedEntry re-use near Entry",
+ "Expected error logged on second write.")
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/entry_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/entry_test.go
index 6555ab6..6555ab6 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/entry_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/entry_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/error.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/error.go
index c67dd71..c40df13 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/error.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/error.go
@@ -98,8 +98,11 @@ func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
}
el := newErrArrayElem(errs[i])
- arr.AppendObject(el)
+ err := arr.AppendObject(el)
el.Free()
+ if err != nil {
+ return err
+ }
}
return nil
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/error_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/error_test.go
index d8263ab..41f243a 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/error_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/error_test.go
@@ -29,6 +29,7 @@ import (
"github.com/stretchr/testify/assert"
"go.uber.org/multierr"
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
)
@@ -161,3 +162,49 @@ func TestRichErrorSupport(t *testing.T) {
f.AddTo(enc)
assert.Equal(t, "failed: egad", enc.Fields["k"], "Unexpected basic error message.")
}
+
+func TestErrArrayBrokenEncoder(t *testing.T) {
+ t.Parallel()
+
+ f := Field{
+ Key: "foo",
+ Type: ErrorType,
+ Interface: multierr.Combine(
+ errors.New("foo"),
+ errors.New("bar"),
+ ),
+ }
+
+ failWith := errors.New("great sadness")
+ enc := NewMapObjectEncoder()
+ f.AddTo(brokenArrayObjectEncoder{
+ Err: failWith,
+ ObjectEncoder: enc,
+ })
+
+ // Failure to add the field to the encoder
+ // causes the error to be added as a string field.
+ assert.Equal(t, "great sadness", enc.Fields["fooError"],
+ "Unexpected error message.")
+}
+
+// brokenArrayObjectEncoder is an ObjectEncoder
+// that builds a broken ArrayEncoder.
+type brokenArrayObjectEncoder struct {
+ ObjectEncoder
+ ArrayEncoder
+
+ Err error // error to return
+}
+
+func (enc brokenArrayObjectEncoder) AddArray(key string, marshaler ArrayMarshaler) error {
+ return enc.ObjectEncoder.AddArray(key,
+ ArrayMarshalerFunc(func(ae ArrayEncoder) error {
+ enc.ArrayEncoder = ae
+ return marshaler.MarshalLogArray(enc)
+ }))
+}
+
+func (enc brokenArrayObjectEncoder) AppendObject(ObjectMarshaler) error {
+ return enc.Err
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/field.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/field.go
index 95bdb0a..308c978 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/field.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/field.go
@@ -47,7 +47,7 @@ const (
ByteStringType
// Complex128Type indicates that the field carries a complex128.
Complex128Type
- // Complex64Type indicates that the field carries a complex128.
+ // Complex64Type indicates that the field carries a complex64.
Complex64Type
// DurationType indicates that the field carries a time.Duration.
DurationType
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/field_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/field_test.go
index c436329..06bcef2 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/field_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/field_test.go
@@ -31,6 +31,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
+
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
)
@@ -88,9 +90,8 @@ type errObj struct {
func (eobj *errObj) Error() string {
if eobj.kind == 1 {
panic("panic in Error() method")
- } else {
- return eobj.errMsg
}
+ return eobj.errMsg
}
func TestUnknownFieldType(t *testing.T) {
@@ -309,6 +310,16 @@ func TestEquals(t *testing.T) {
b: zap.Any("k", map[string]string{"a": "d"}),
want: false,
},
+ {
+ a: zap.Dict("k", zap.String("a", "b")),
+ b: zap.Dict("k", zap.String("a", "b")),
+ want: true,
+ },
+ {
+ a: zap.Dict("k", zap.String("a", "b")),
+ b: zap.Dict("k", zap.String("a", "d")),
+ want: false,
+ },
}
for _, tt := range tests {
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/hook.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/hook.go
index 198def9..198def9 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/hook.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/hook.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/hook_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/hook_test.go
index 46e3c35..360b222 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/hook_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/hook_test.go
@@ -23,6 +23,7 @@ package zapcore_test
import (
"testing"
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest/observer"
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/increase_level.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/increase_level.go
index 7a11237..7a11237 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/increase_level.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/increase_level.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/increase_level_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/increase_level_test.go
index f80d790..14cd857 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/increase_level_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/increase_level_test.go
@@ -27,6 +27,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
+
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest/observer"
)
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder.go
index ce6838d..9685169 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder.go
@@ -372,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final.AppendString(ent.Level.String())
}
}
- if final.TimeKey != "" {
+ if final.TimeKey != "" && !ent.Time.IsZero() {
final.AddTime(final.TimeKey, ent.Time)
}
if ent.LoggerName != "" && final.NameKey != "" {
@@ -486,73 +486,98 @@ func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
// Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) {
- for i := 0; i < len(s); {
- if enc.tryAddRuneSelf(s[i]) {
- i++
- continue
- }
- r, size := utf8.DecodeRuneInString(s[i:])
- if enc.tryAddRuneError(r, size) {
- i++
- continue
- }
- enc.buf.AppendString(s[i : i+size])
- i += size
- }
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendString,
+ utf8.DecodeRuneInString,
+ enc.buf,
+ s,
+ )
}
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) {
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendBytes,
+ utf8.DecodeRune,
+ enc.buf,
+ s,
+ )
+}
+
+// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString.
+// It appends a string or byte slice to the buffer, escaping all special characters.
+func safeAppendStringLike[S []byte | string](
+ // appendTo appends this string-like object to the buffer.
+ appendTo func(*buffer.Buffer, S),
+ // decodeRune decodes the next rune from the string-like object
+ // and returns its value and width in bytes.
+ decodeRune func(S) (rune, int),
+ buf *buffer.Buffer,
+ s S,
+) {
+ // The encoding logic below works by skipping over characters
+ // that can be safely copied as-is,
+ // until a character is found that needs special handling.
+ // At that point, we copy everything we've seen so far,
+ // and then handle that special character.
+ //
+ // last is the index of the last byte that was copied to the buffer.
+ last := 0
for i := 0; i < len(s); {
- if enc.tryAddRuneSelf(s[i]) {
+ if s[i] >= utf8.RuneSelf {
+ // Character >= RuneSelf may be part of a multi-byte rune.
+ // They need to be decoded before we can decide how to handle them.
+ r, size := decodeRune(s[i:])
+ if r != utf8.RuneError || size != 1 {
+ // No special handling required.
+ // Skip over this rune and continue.
+ i += size
+ continue
+ }
+
+ // Invalid UTF-8 sequence.
+ // Replace it with the Unicode replacement character.
+ appendTo(buf, s[last:i])
+ buf.AppendString(`\ufffd`)
+
i++
- continue
- }
- r, size := utf8.DecodeRune(s[i:])
- if enc.tryAddRuneError(r, size) {
+ last = i
+ } else {
+ // Character < RuneSelf is a single-byte UTF-8 rune.
+ if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' {
+ // No escaping necessary.
+ // Skip over this character and continue.
+ i++
+ continue
+ }
+
+ // This character needs to be escaped.
+ appendTo(buf, s[last:i])
+ switch s[i] {
+ case '\\', '"':
+ buf.AppendByte('\\')
+ buf.AppendByte(s[i])
+ case '\n':
+ buf.AppendByte('\\')
+ buf.AppendByte('n')
+ case '\r':
+ buf.AppendByte('\\')
+ buf.AppendByte('r')
+ case '\t':
+ buf.AppendByte('\\')
+ buf.AppendByte('t')
+ default:
+ // Encode bytes < 0x20, except for the escape sequences above.
+ buf.AppendString(`\u00`)
+ buf.AppendByte(_hex[s[i]>>4])
+ buf.AppendByte(_hex[s[i]&0xF])
+ }
+
i++
- continue
+ last = i
}
- enc.buf.Write(s[i : i+size])
- i += size
}
-}
-// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
-func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
- if b >= utf8.RuneSelf {
- return false
- }
- if b >= 0x20 && b != '\\' && b != '"' {
- enc.buf.AppendByte(b)
- return true
- }
- switch b {
- case '\\', '"':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte(b)
- case '\n':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('n')
- case '\r':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('r')
- case '\t':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('t')
- default:
- // Encode bytes < 0x20, except for the escape sequences above.
- enc.buf.AppendString(`\u00`)
- enc.buf.AppendByte(_hex[b>>4])
- enc.buf.AppendByte(_hex[b&0xF])
- }
- return true
-}
-
-func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
- if r == utf8.RuneError && size == 1 {
- enc.buf.AppendString(`\ufffd`)
- return true
- }
- return false
+ // add remaining
+ appendTo(buf, s[last:])
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder_bench_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder_bench_test.go
index bcb5a01..d870e07 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder_bench_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder_bench_test.go
@@ -22,19 +22,24 @@ package zapcore_test
import (
"encoding/json"
+ "fmt"
"testing"
"time"
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
)
func BenchmarkJSONLogMarshalerFunc(b *testing.B) {
for i := 0; i < b.N; i++ {
enc := NewJSONEncoder(testEncoderConfig())
- enc.AddObject("nested", ObjectMarshalerFunc(func(enc ObjectEncoder) error {
+ err := enc.AddObject("nested", ObjectMarshalerFunc(func(enc ObjectEncoder) error {
enc.AddInt64("i", int64(i))
return nil
}))
+ if err != nil {
+ b.Fatal(err)
+ }
}
}
@@ -48,7 +53,28 @@ func BenchmarkZapJSONFloat32AndComplex64(b *testing.B) {
})
}
+const _sliceSize = 5000
+
+type StringSlice []string
+
+func (s StringSlice) MarshalLogArray(encoder ArrayEncoder) error {
+ for _, str := range s {
+ encoder.AppendString(str)
+ }
+ return nil
+}
+
+func generateStringSlice(n int) StringSlice {
+ output := make(StringSlice, 0, n)
+ for i := 0; i < n; i++ {
+ output = append(output, fmt.Sprint("00000000-0000-0000-0000-0000000000", i))
+ }
+ return output
+}
+
func BenchmarkZapJSON(b *testing.B) {
+ additional := generateStringSlice(_sliceSize)
+ b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
enc := NewJSONEncoder(testEncoderConfig())
@@ -61,6 +87,7 @@ func BenchmarkZapJSON(b *testing.B) {
enc.AddString("string3", "🤔")
enc.AddString("string4", "🙊")
enc.AddBool("bool", true)
+ _ = enc.AddArray("test", additional)
buf, _ := enc.EncodeEntry(Entry{
Message: "fake",
Level: DebugLevel,
@@ -72,10 +99,11 @@ func BenchmarkZapJSON(b *testing.B) {
func BenchmarkStandardJSON(b *testing.B) {
record := struct {
- Level string `json:"level"`
- Message string `json:"msg"`
- Time time.Time `json:"ts"`
- Fields map[string]interface{} `json:"fields"`
+ Level string `json:"level"`
+ Message string `json:"msg"`
+ Time time.Time `json:"ts"`
+ Fields map[string]interface{} `json:"fields"`
+ Additional StringSlice
}{
Level: "debug",
Message: "fake",
@@ -91,11 +119,14 @@ func BenchmarkStandardJSON(b *testing.B) {
"string4": "🙊",
"bool": true,
},
+ Additional: generateStringSlice(_sliceSize),
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- json.Marshal(record)
+ if _, err := json.Marshal(record); err != nil {
+ b.Fatal(err)
+ }
}
})
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder_impl_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder_impl_test.go
index fde241f..5f81262 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder_impl_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder_impl_test.go
@@ -29,10 +29,13 @@ import (
"testing"
"testing/quick"
"time"
+ "unicode/utf8"
+ "go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"go.uber.org/multierr"
)
@@ -249,7 +252,7 @@ func TestJSONEncoderObjectFields(t *testing.T) {
desc: "object (no nested namespace)",
expected: `"obj":{"obj-out":"obj-outside-namespace"},"not-obj":"should-be-outside-obj"`,
f: func(e Encoder) {
- e.AddObject("obj", maybeNamespace{false})
+ assert.NoError(t, e.AddObject("obj", maybeNamespace{false}))
e.AddString("not-obj", "should-be-outside-obj")
},
},
@@ -257,7 +260,7 @@ func TestJSONEncoderObjectFields(t *testing.T) {
desc: "object (with nested namespace)",
expected: `"obj":{"obj-out":"obj-outside-namespace","obj-namespace":{"obj-in":"obj-inside-namespace"}},"not-obj":"should-be-outside-obj"`,
f: func(e Encoder) {
- e.AddObject("obj", maybeNamespace{true})
+ assert.NoError(t, e.AddObject("obj", maybeNamespace{true}))
e.AddString("not-obj", "should-be-outside-obj")
},
},
@@ -265,7 +268,7 @@ func TestJSONEncoderObjectFields(t *testing.T) {
desc: "multiple open namespaces",
expected: `"k":{"foo":1,"middle":{"foo":2,"inner":{"foo":3}}}`,
f: func(e Encoder) {
- e.AddObject("k", ObjectMarshalerFunc(func(enc ObjectEncoder) error {
+ err := e.AddObject("k", ObjectMarshalerFunc(func(enc ObjectEncoder) error {
e.AddInt("foo", 1)
e.OpenNamespace("middle")
e.AddInt("foo", 2)
@@ -273,6 +276,7 @@ func TestJSONEncoderObjectFields(t *testing.T) {
e.AddInt("foo", 3)
return nil
}))
+ assert.NoError(t, err)
},
},
}
@@ -289,10 +293,11 @@ func TestJSONEncoderTimeFormats(t *testing.T) {
f := func(e Encoder) {
e.AddTime("k", date)
- e.AddArray("a", ArrayMarshalerFunc(func(enc ArrayEncoder) error {
+ err := e.AddArray("a", ArrayMarshalerFunc(func(enc ArrayEncoder) error {
enc.AppendTime(date)
return nil
}))
+ assert.NoError(t, err)
}
tests := []struct {
desc string
@@ -420,7 +425,7 @@ func TestJSONEncoderArrays(t *testing.T) {
desc: "object (no nested namespace) then string",
expected: `[{"obj-out":"obj-outside-namespace"},"should-be-outside-obj",{"obj-out":"obj-outside-namespace"},"should-be-outside-obj"]`,
f: func(arr ArrayEncoder) {
- arr.AppendObject(maybeNamespace{false})
+ assert.NoError(t, arr.AppendObject(maybeNamespace{false}))
arr.AppendString("should-be-outside-obj")
},
},
@@ -428,7 +433,7 @@ func TestJSONEncoderArrays(t *testing.T) {
desc: "object (with nested namespace) then string",
expected: `[{"obj-out":"obj-outside-namespace","obj-namespace":{"obj-in":"obj-inside-namespace"}},"should-be-outside-obj",{"obj-out":"obj-outside-namespace","obj-namespace":{"obj-in":"obj-inside-namespace"}},"should-be-outside-obj"]`,
f: func(arr ArrayEncoder) {
- arr.AppendObject(maybeNamespace{true})
+ assert.NoError(t, arr.AppendObject(maybeNamespace{true}))
arr.AppendString("should-be-outside-obj")
},
},
@@ -530,10 +535,13 @@ type turducken struct{}
func (t turducken) MarshalLogObject(enc ObjectEncoder) error {
return enc.AddArray("ducks", ArrayMarshalerFunc(func(arr ArrayEncoder) error {
for i := 0; i < 2; i++ {
- arr.AppendObject(ObjectMarshalerFunc(func(inner ObjectEncoder) error {
+ err := arr.AppendObject(ObjectMarshalerFunc(func(inner ObjectEncoder) error {
inner.AddString("in", "chicken")
return nil
}))
+ if err != nil {
+ return err
+ }
}
return nil
}))
@@ -657,3 +665,72 @@ func TestJSONQuick(t *testing.T) {
check(asciiRoundTripsCorrectlyString)
check(asciiRoundTripsCorrectlyByteString)
}
+
+var _stringLikeCorpus = []string{
+ "",
+ "foo",
+ "bar",
+ "a\nb",
+ "a\tb",
+ "a\\b",
+ `a"b`,
+}
+
+func FuzzSafeAppendStringLike_bytes(f *testing.F) {
+ for _, s := range _stringLikeCorpus {
+ f.Add([]byte(s))
+ }
+ f.Fuzz(func(t *testing.T, b []byte) {
+ if !utf8.Valid(b) {
+ t.Skip()
+ }
+
+ fuzzSafeAppendStringLike(t, string(b), func(buf *buffer.Buffer) {
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendBytes,
+ utf8.DecodeRune,
+ buf,
+ b,
+ )
+ })
+ })
+}
+
+func FuzzSafeAppendStringLike_string(f *testing.F) {
+ for _, s := range _stringLikeCorpus {
+ f.Add(s)
+ }
+ f.Fuzz(func(t *testing.T, s string) {
+ if !utf8.ValidString(s) {
+ t.Skip()
+ }
+
+ fuzzSafeAppendStringLike(t, s, func(buf *buffer.Buffer) {
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendString,
+ utf8.DecodeRuneInString,
+ buf,
+ s,
+ )
+ })
+ })
+}
+
+func fuzzSafeAppendStringLike(
+ t *testing.T,
+ want string,
+ writeString func(*buffer.Buffer),
+) {
+ t.Helper()
+
+ buf := bufferpool.Get()
+ defer buf.Free()
+
+ buf.AppendByte('"')
+ writeString(buf)
+ buf.AppendByte('"')
+
+ var got string
+ require.NoError(t, json.Unmarshal(buf.Bytes(), &got))
+ assert.Equal(t, want, got)
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder_test.go
index 4c651cf..b215025 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/json_encoder_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/json_encoder_test.go
@@ -109,6 +109,20 @@ func TestJSONEncodeEntry(t *testing.T) {
}),
},
},
+ {
+ desc: "zero_time_omitted",
+ expected: `{
+ "L": "info",
+ "N": "name",
+ "M": "message"
+ }`,
+ ent: zapcore.Entry{
+ Level: zapcore.InfoLevel,
+ Time: time.Time{},
+ LoggerName: "name",
+ Message: "message",
+ },
+ },
}
enc := zapcore.NewJSONEncoder(zapcore.EncoderConfig{
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/clock.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/lazy_with.go
index fe8026d..05288d6 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/internal/ztest/clock.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/lazy_with.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2021 Uber Technologies, Inc.
+// Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,33 +18,37 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package ztest
+package zapcore
-import (
- "time"
+import "sync"
- "github.com/benbjohnson/clock"
-)
-
-// MockClock provides control over the time.
-type MockClock struct{ m *clock.Mock }
+type lazyWithCore struct {
+ Core
+ sync.Once
+ fields []Field
+}
-// NewMockClock builds a new mock clock that provides control of time.
-func NewMockClock() *MockClock {
- return &MockClock{clock.NewMock()}
+// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if
+// the logger is written to (or is further chained in a lon-lazy manner).
+func NewLazyWith(core Core, fields []Field) Core {
+ return &lazyWithCore{
+ Core: core,
+ fields: fields,
+ }
}
-// Now reports the current time.
-func (c *MockClock) Now() time.Time {
- return c.m.Now()
+func (d *lazyWithCore) initOnce() {
+ d.Once.Do(func() {
+ d.Core = d.Core.With(d.fields)
+ })
}
-// NewTicker returns a time.Ticker that ticks at the specified frequency.
-func (c *MockClock) NewTicker(d time.Duration) *time.Ticker {
- return &time.Ticker{C: c.m.Ticker(d).C}
+func (d *lazyWithCore) With(fields []Field) Core {
+ d.initOnce()
+ return d.Core.With(fields)
}
-// Add progresses time by the given duration.
-func (c *MockClock) Add(d time.Duration) {
- c.m.Add(d)
+func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry {
+ d.initOnce()
+ return d.Core.Check(e, ce)
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/lazy_with_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/lazy_with_test.go
new file mode 100644
index 0000000..c86b59e
--- /dev/null
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/lazy_with_test.go
@@ -0,0 +1,154 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore_test
+
+import (
+ "sync/atomic"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zaptest/observer"
+)
+
+type proxyCore struct {
+ zapcore.Core
+
+ withCount atomic.Int64
+ checkCount atomic.Int64
+}
+
+func newProxyCore(inner zapcore.Core) *proxyCore {
+ return &proxyCore{Core: inner}
+}
+
+func (p *proxyCore) With(fields []zapcore.Field) zapcore.Core {
+ p.withCount.Add(1)
+ return p.Core.With(fields)
+}
+
+func (p *proxyCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
+ p.checkCount.Add(1)
+ return p.Core.Check(e, ce)
+}
+
+func withLazyCore(f func(zapcore.Core, *proxyCore, *observer.ObservedLogs), initialFields ...zapcore.Field) {
+ infoLogger, infoLogs := observer.New(zapcore.InfoLevel)
+ proxyCore := newProxyCore(infoLogger)
+ lazyCore := zapcore.NewLazyWith(proxyCore, initialFields)
+ f(lazyCore, proxyCore, infoLogs)
+}
+
+func TestLazyCore(t *testing.T) {
+ tests := []struct {
+ name string
+ entries []zapcore.Entry
+ initialFields []zapcore.Field
+ withChains [][]zapcore.Field
+ wantLogs []observer.LoggedEntry
+ }{
+ {
+ name: "no logging, no with, inner core with never called, inner core check never called",
+ wantLogs: []observer.LoggedEntry{},
+ },
+ {
+ name: "2 logs, 1 dropped, no with, inner core with called once, inner core check never called",
+ entries: []zapcore.Entry{
+ {Level: zapcore.DebugLevel, Message: "log-at-debug"},
+ {Level: zapcore.WarnLevel, Message: "log-at-warn"},
+ },
+ wantLogs: []observer.LoggedEntry{
+ {
+ Entry: zapcore.Entry{
+ Level: zapcore.WarnLevel,
+ Message: "log-at-warn",
+ },
+ Context: []zapcore.Field{},
+ },
+ },
+ },
+ {
+ name: "no logs, 2-chained with, inner core with called once, inner core check never called",
+ withChains: [][]zapcore.Field{
+ {makeInt64Field("a", 11), makeInt64Field("b", 22)},
+ {makeInt64Field("c", 33), makeInt64Field("d", 44)},
+ },
+ wantLogs: []observer.LoggedEntry{},
+ },
+ {
+ name: "2 logs, 1 dropped, 2-chained with, inner core with called once, inner core check never called",
+ entries: []zapcore.Entry{
+ {Level: zapcore.DebugLevel, Message: "log-at-debug"},
+ {Level: zapcore.WarnLevel, Message: "log-at-warn"},
+ },
+ withChains: [][]zapcore.Field{
+ {makeInt64Field("a", 11), makeInt64Field("b", 22)},
+ {makeInt64Field("c", 33), makeInt64Field("d", 44)},
+ },
+ wantLogs: []observer.LoggedEntry{
+ {
+ Entry: zapcore.Entry{
+ Level: zapcore.WarnLevel,
+ Message: "log-at-warn",
+ },
+ Context: []zapcore.Field{
+ makeInt64Field("a", 11),
+ makeInt64Field("b", 22),
+ makeInt64Field("c", 33),
+ makeInt64Field("d", 44),
+ },
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ withLazyCore(func(lazy zapcore.Core, proxy *proxyCore, logs *observer.ObservedLogs) {
+ checkCounts := func(withCount int64, msg string) {
+ assert.Equal(t, withCount, proxy.withCount.Load(), msg)
+ }
+ checkCounts(0, "expected no with calls because the logger is not used yet")
+
+ for _, chain := range tt.withChains {
+ lazy = lazy.With(chain)
+ }
+ if len(tt.withChains) > 0 {
+ checkCounts(1, "expected with calls because the logger was with-chained")
+ } else {
+ checkCounts(0, "expected no with calls because the logger is not used yet")
+ }
+
+ for _, ent := range tt.entries {
+ if ce := lazy.Check(ent, nil); ce != nil {
+ ce.Write()
+ }
+ }
+ if len(tt.entries) > 0 || len(tt.withChains) > 0 {
+ checkCounts(1, "expected with calls because the logger had entries or with chains")
+ } else {
+ checkCounts(0, "expected no with calls because the logger is not used yet")
+ }
+ assert.Zero(t, proxy.checkCount.Load(), "expected no check calls because the inner core is copied")
+ assert.Equal(t, tt.wantLogs, logs.AllUntimed())
+ }, tt.initialFields...)
+ })
+ }
+}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/leak_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/leak_test.go
index 4ef412e..4ef412e 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/leak_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/leak_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level.go
index e01a241..e01a241 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level_strings.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level_strings.go
index 7af8dad..7af8dad 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level_strings.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level_strings.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level_strings_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level_strings_test.go
index 14b0bac..14b0bac 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level_strings_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level_strings_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level_test.go
index ab97c98..d8eb962 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/level_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/level_test.go
@@ -159,7 +159,7 @@ func TestLevelNils(t *testing.T) {
}, "Level(nil).String() should panic")
assert.Panics(t, func() {
- l.MarshalText()
+ _, _ = l.MarshalText() // should panic
}, "Expected to panic when marshalling a nil level.")
err := l.UnmarshalText([]byte("debug"))
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/marshaler.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/marshaler.go
index c3c55ba..c3c55ba 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/marshaler.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/marshaler.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/memory_encoder.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/memory_encoder.go
index dfead08..dfead08 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/memory_encoder.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/memory_encoder.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/memory_encoder_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/memory_encoder_test.go
index 052bdb0..d5f215f 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/memory_encoder_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/memory_encoder_test.go
@@ -218,7 +218,7 @@ func TestMapObjectEncoderAdd(t *testing.T) {
desc: "object (no nested namespace) then string",
f: func(e ObjectEncoder) {
e.OpenNamespace("k")
- e.AddObject("obj", maybeNamespace{false})
+ assert.NoError(t, e.AddObject("obj", maybeNamespace{false}))
e.AddString("not-obj", "should-be-outside-obj")
},
expected: map[string]interface{}{
@@ -232,7 +232,7 @@ func TestMapObjectEncoderAdd(t *testing.T) {
desc: "object (with nested namespace) then string",
f: func(e ObjectEncoder) {
e.OpenNamespace("k")
- e.AddObject("obj", maybeNamespace{true})
+ assert.NoError(t, e.AddObject("obj", maybeNamespace{true}))
e.AddString("not-obj", "should-be-outside-obj")
},
expected: map[string]interface{}{
@@ -255,6 +255,7 @@ func TestMapObjectEncoderAdd(t *testing.T) {
})
}
}
+
func TestSliceArrayEncoderAppend(t *testing.T) {
tests := []struct {
desc string
@@ -284,29 +285,33 @@ func TestSliceArrayEncoderAppend(t *testing.T) {
{"AppendUint8", func(e ArrayEncoder) { e.AppendUint8(42) }, uint8(42)},
{"AppendUintptr", func(e ArrayEncoder) { e.AppendUintptr(42) }, uintptr(42)},
{
- desc: "AppendReflected",
- f: func(e ArrayEncoder) { e.AppendReflected(map[string]interface{}{"foo": 5}) },
+ desc: "AppendReflected",
+ f: func(e ArrayEncoder) {
+ assert.NoError(t, e.AppendReflected(map[string]interface{}{"foo": 5}))
+ },
expected: map[string]interface{}{"foo": 5},
},
{
desc: "AppendArray (arrays of arrays)",
f: func(e ArrayEncoder) {
- e.AppendArray(ArrayMarshalerFunc(func(inner ArrayEncoder) error {
+ err := e.AppendArray(ArrayMarshalerFunc(func(inner ArrayEncoder) error {
inner.AppendBool(true)
inner.AppendBool(false)
return nil
}))
+ assert.NoError(t, err)
},
expected: []interface{}{true, false},
},
{
desc: "object (no nested namespace) then string",
f: func(e ArrayEncoder) {
- e.AppendArray(ArrayMarshalerFunc(func(inner ArrayEncoder) error {
- inner.AppendObject(maybeNamespace{false})
+ err := e.AppendArray(ArrayMarshalerFunc(func(inner ArrayEncoder) error {
+ err := inner.AppendObject(maybeNamespace{false})
inner.AppendString("should-be-outside-obj")
- return nil
+ return err
}))
+ assert.NoError(t, err)
},
expected: []interface{}{
map[string]interface{}{
@@ -318,11 +323,12 @@ func TestSliceArrayEncoderAppend(t *testing.T) {
{
desc: "object (with nested namespace) then string",
f: func(e ArrayEncoder) {
- e.AppendArray(ArrayMarshalerFunc(func(inner ArrayEncoder) error {
- inner.AppendObject(maybeNamespace{true})
+ err := e.AppendArray(ArrayMarshalerFunc(func(inner ArrayEncoder) error {
+ err := inner.AppendObject(maybeNamespace{true})
inner.AppendString("should-be-outside-obj")
- return nil
+ return err
}))
+ assert.NoError(t, err)
},
expected: []interface{}{
map[string]interface{}{
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/reflected_encoder.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/reflected_encoder.go
index 8746360..8746360 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/reflected_encoder.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/reflected_encoder.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/sampler.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/sampler.go
index b7c093a..b7c093a 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/sampler.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/sampler.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/sampler_bench_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/sampler_bench_test.go
index 100e226..1b250cd 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/sampler_bench_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/sampler_bench_test.go
@@ -28,6 +28,8 @@ import (
"github.com/stretchr/testify/assert"
"go.uber.org/zap/internal/ztest"
+
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
)
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/sampler_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/sampler_test.go
index df726a2..55b4afa 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/sampler_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/sampler_test.go
@@ -29,6 +29,7 @@ import (
"time"
"go.uber.org/zap/internal/ztest"
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest/observer"
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/tee.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/tee.go
index 9bb32f0..9bb32f0 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/tee.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/tee.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/tee_logger_bench_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/tee_logger_bench_test.go
index b30a173..d2fc42b 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/tee_logger_bench_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/tee_logger_bench_test.go
@@ -24,6 +24,7 @@ import (
"testing"
"go.uber.org/zap/internal/ztest"
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
)
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/tee_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/tee_test.go
index b2b9c9d..f6b14eb 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/tee_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/tee_test.go
@@ -25,6 +25,7 @@ import (
"testing"
"go.uber.org/zap/internal/ztest"
+ //revive:disable:dot-imports
. "go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest/observer"
@@ -120,7 +121,7 @@ func TestTeeWrite(t *testing.T) {
debugEntry := Entry{Level: DebugLevel, Message: "log-at-debug"}
warnEntry := Entry{Level: WarnLevel, Message: "log-at-warn"}
for _, ent := range []Entry{debugEntry, warnEntry} {
- tee.Write(ent, nil)
+ assert.NoError(t, tee.Write(ent, nil))
}
for _, logs := range []*observer.ObservedLogs{debugLogs, warnLogs} {
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/write_syncer.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/write_syncer.go
index d4a1af3..d4a1af3 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/write_syncer.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/write_syncer.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/write_syncer_bench_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/write_syncer_bench_test.go
index db6ec45..90ae475 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/write_syncer_bench_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/write_syncer_bench_test.go
@@ -24,6 +24,7 @@ import (
"os"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/internal/ztest"
)
@@ -37,7 +38,9 @@ func BenchmarkMultiWriteSyncer(b *testing.B) {
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- w.Write([]byte("foobarbazbabble"))
+ if _, err := w.Write([]byte("foobarbazbabble")); err != nil {
+ b.Fatal(err)
+ }
}
})
})
@@ -51,7 +54,9 @@ func BenchmarkMultiWriteSyncer(b *testing.B) {
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- w.Write([]byte("foobarbazbabble"))
+ if _, err := w.Write([]byte("foobarbazbabble")); err != nil {
+ b.Fatal(err)
+ }
}
})
})
@@ -64,11 +69,15 @@ func BenchmarkMultiWriteSyncer(b *testing.B) {
&ztest.Discarder{},
),
}
- defer w.Stop()
+ defer func() {
+ assert.NoError(b, w.Stop(), "Unexpected error stopping buffered write syncer.")
+ }()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- w.Write([]byte("foobarbazbabble"))
+ if _, err := w.Write([]byte("foobarbazbabble")); err != nil {
+ b.Fatal(err)
+ }
}
})
})
@@ -83,7 +92,9 @@ func BenchmarkWriteSyncer(b *testing.B) {
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- w.Write([]byte("foobarbazbabble"))
+ if _, err := w.Write([]byte("foobarbazbabble")); err != nil {
+ b.Fatal(err)
+ }
}
})
})
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/write_syncer_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/write_syncer_test.go
index 4748be7..c0c2698 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapcore/write_syncer_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapcore/write_syncer_test.go
@@ -70,7 +70,7 @@ func TestNewMultiWriteSyncerWorksForSingleWriter(t *testing.T) {
ws := NewMultiWriteSyncer(w)
assert.Equal(t, w, ws, "Expected NewMultiWriteSyncer to return the same WriteSyncer object for a single argument.")
- ws.Sync()
+ assert.NoError(t, ws.Sync(), "Expected Sync to succeed.")
assert.True(t, w.Called(), "Expected Sync to be called on the created WriteSyncer")
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapgrpc/zapgrpc.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapgrpc/zapgrpc.go
index 6823773..682de25 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapgrpc/zapgrpc.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapgrpc/zapgrpc.go
@@ -36,16 +36,14 @@ const (
grpcLvlFatal
)
-var (
- // _grpcToZapLevel maps gRPC log levels to zap log levels.
- // See https://pkg.go.dev/go.uber.org/zap@v1.16.0/zapcore#Level
- _grpcToZapLevel = map[int]zapcore.Level{
- grpcLvlInfo: zapcore.InfoLevel,
- grpcLvlWarn: zapcore.WarnLevel,
- grpcLvlError: zapcore.ErrorLevel,
- grpcLvlFatal: zapcore.FatalLevel,
- }
-)
+// _grpcToZapLevel maps gRPC log levels to zap log levels.
+// See https://pkg.go.dev/go.uber.org/zap@v1.16.0/zapcore#Level
+var _grpcToZapLevel = map[int]zapcore.Level{
+ grpcLvlInfo: zapcore.InfoLevel,
+ grpcLvlWarn: zapcore.WarnLevel,
+ grpcLvlError: zapcore.ErrorLevel,
+ grpcLvlFatal: zapcore.FatalLevel,
+}
// An Option overrides a Logger's default configuration.
type Option interface {
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapgrpc/zapgrpc_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapgrpc/zapgrpc_test.go
index a231d65..a231d65 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapgrpc/zapgrpc_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapgrpc/zapgrpc_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapio/example_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapio/example_test.go
index e9565db..e9565db 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapio/example_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapio/example_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapio/writer.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapio/writer.go
index a87d910..a87d910 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapio/writer.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapio/writer.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapio/writer_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapio/writer_test.go
index 9bdf348..9bdf348 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zapio/writer_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zapio/writer_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/doc.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/doc.go
index b377859..b377859 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/doc.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/doc.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/logger.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/logger.go
index 6a4a354..4734c33 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/logger.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/logger.go
@@ -82,7 +82,7 @@ func NewLogger(t TestingT, opts ...LoggerOption) *zap.Logger {
o.applyLoggerOption(&cfg)
}
- writer := newTestingWriter(t)
+ writer := NewTestingWriter(t)
zapOptions := []zap.Option{
// Send zap errors to the same writer and mark the test as failed if
// that happens.
@@ -100,27 +100,43 @@ func NewLogger(t TestingT, opts ...LoggerOption) *zap.Logger {
)
}
-// testingWriter is a WriteSyncer that writes to the given testing.TB.
-type testingWriter struct {
+// TestingWriter is a WriteSyncer that writes to the given testing.TB.
+type TestingWriter struct {
t TestingT
- // If true, the test will be marked as failed if this testingWriter is
+ // If true, the test will be marked as failed if this TestingWriter is
// ever used.
markFailed bool
}
-func newTestingWriter(t TestingT) testingWriter {
- return testingWriter{t: t}
+// NewTestingWriter builds a new TestingWriter that writes to the given
+// testing.TB.
+//
+// Use this if you need more flexibility when creating *zap.Logger
+// than zaptest.NewLogger() provides.
+//
+// E.g., if you want to use custom core with zaptest.TestingWriter:
+//
+// encoder := newCustomEncoder()
+// writer := zaptest.NewTestingWriter(t)
+// level := zap.NewAtomicLevelAt(zapcore.DebugLevel)
+//
+// core := newCustomCore(encoder, writer, level)
+//
+// logger := zap.New(core, zap.AddCaller())
+func NewTestingWriter(t TestingT) TestingWriter {
+ return TestingWriter{t: t}
}
-// WithMarkFailed returns a copy of this testingWriter with markFailed set to
+// WithMarkFailed returns a copy of this TestingWriter with markFailed set to
// the provided value.
-func (w testingWriter) WithMarkFailed(v bool) testingWriter {
+func (w TestingWriter) WithMarkFailed(v bool) TestingWriter {
w.markFailed = v
return w
}
-func (w testingWriter) Write(p []byte) (n int, err error) {
+// Write writes bytes from p to the underlying testing.TB.
+func (w TestingWriter) Write(p []byte) (n int, err error) {
n = len(p)
// Strip trailing newline because t.Log always adds one.
@@ -135,6 +151,7 @@ func (w testingWriter) Write(p []byte) (n int, err error) {
return n, nil
}
-func (w testingWriter) Sync() error {
+// Sync commits the current contents (a no-op for TestingWriter).
+func (w TestingWriter) Sync() error {
return nil
}
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/logger_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/logger_test.go
index 576f682..40e368b 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/logger_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/logger_test.go
@@ -106,7 +106,7 @@ func TestTestLoggerSupportsWrappedZapOptions(t *testing.T) {
func TestTestingWriter(t *testing.T) {
ts := newTestLogSpy(t)
- w := newTestingWriter(ts)
+ w := NewTestingWriter(ts)
n, err := io.WriteString(w, "hello\n\n")
assert.NoError(t, err, "WriteString must not fail")
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/logged_entry.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/logged_entry.go
index a4ea7ec..a4ea7ec 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/logged_entry.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/logged_entry.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/logged_entry_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/logged_entry_test.go
index 50f6123..50f6123 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/logged_entry_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/logged_entry_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/observer.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/observer.go
index f77f130..f77f130 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/observer.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/observer.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/observer_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/observer_test.go
index 2a901b1..0cf631c 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/observer/observer_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/observer/observer_test.go
@@ -29,6 +29,8 @@ import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
+
+ //revive:disable:dot-imports
. "go.uber.org/zap/zaptest/observer"
)
@@ -173,7 +175,7 @@ func TestFilters(t *testing.T) {
logger, sink := New(zap.InfoLevel)
for _, log := range logs {
- logger.Write(log.Entry, log.Context)
+ assert.NoError(t, logger.Write(log.Entry, log.Context), "Unexpected error writing log entry.")
}
tests := []struct {
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/testingt.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/testingt.go
index 792463b..792463b 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/testingt.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/testingt.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/testingt_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/testingt_test.go
index d847796..d847796 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/testingt_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/testingt_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/timeout.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/timeout.go
index f0be444..f0be444 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/timeout.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/timeout.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/timeout_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/timeout_test.go
index 3962ecd..3962ecd 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/timeout_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/timeout_test.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/writer.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/writer.go
index 4b772f8..4b772f8 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/writer.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/writer.go
diff --git a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/writer_test.go b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/writer_test.go
index c18f18a..c18f18a 100644
--- a/dependencies/pkg/mod/go.uber.org/zap@v1.25.0/zaptest/writer_test.go
+++ b/dependencies/pkg/mod/go.uber.org/zap@v1.27.0/zaptest/writer_test.go
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map.go b/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map.go
deleted file mode 100644
index 4b638cb..0000000
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package syncmap provides a concurrent map implementation.
-// This was the prototype for sync.Map which was added to the standard library's
-// sync package in Go 1.9. https://golang.org/pkg/sync/#Map.
-package syncmap
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/pre_go19.go b/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/pre_go19.go
deleted file mode 100644
index 5bba413..0000000
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/pre_go19.go
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.9
-// +build !go1.9
-
-package syncmap
-
-import (
- "sync"
- "sync/atomic"
- "unsafe"
-)
-
-// Map is a concurrent map with amortized-constant-time loads, stores, and deletes.
-// It is safe for multiple goroutines to call a Map's methods concurrently.
-//
-// The zero Map is valid and empty.
-//
-// A Map must not be copied after first use.
-type Map struct {
- mu sync.Mutex
-
- // read contains the portion of the map's contents that are safe for
- // concurrent access (with or without mu held).
- //
- // The read field itself is always safe to load, but must only be stored with
- // mu held.
- //
- // Entries stored in read may be updated concurrently without mu, but updating
- // a previously-expunged entry requires that the entry be copied to the dirty
- // map and unexpunged with mu held.
- read atomic.Value // readOnly
-
- // dirty contains the portion of the map's contents that require mu to be
- // held. To ensure that the dirty map can be promoted to the read map quickly,
- // it also includes all of the non-expunged entries in the read map.
- //
- // Expunged entries are not stored in the dirty map. An expunged entry in the
- // clean map must be unexpunged and added to the dirty map before a new value
- // can be stored to it.
- //
- // If the dirty map is nil, the next write to the map will initialize it by
- // making a shallow copy of the clean map, omitting stale entries.
- dirty map[interface{}]*entry
-
- // misses counts the number of loads since the read map was last updated that
- // needed to lock mu to determine whether the key was present.
- //
- // Once enough misses have occurred to cover the cost of copying the dirty
- // map, the dirty map will be promoted to the read map (in the unamended
- // state) and the next store to the map will make a new dirty copy.
- misses int
-}
-
-// readOnly is an immutable struct stored atomically in the Map.read field.
-type readOnly struct {
- m map[interface{}]*entry
- amended bool // true if the dirty map contains some key not in m.
-}
-
-// expunged is an arbitrary pointer that marks entries which have been deleted
-// from the dirty map.
-var expunged = unsafe.Pointer(new(interface{}))
-
-// An entry is a slot in the map corresponding to a particular key.
-type entry struct {
- // p points to the interface{} value stored for the entry.
- //
- // If p == nil, the entry has been deleted and m.dirty == nil.
- //
- // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
- // is missing from m.dirty.
- //
- // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
- // != nil, in m.dirty[key].
- //
- // An entry can be deleted by atomic replacement with nil: when m.dirty is
- // next created, it will atomically replace nil with expunged and leave
- // m.dirty[key] unset.
- //
- // An entry's associated value can be updated by atomic replacement, provided
- // p != expunged. If p == expunged, an entry's associated value can be updated
- // only after first setting m.dirty[key] = e so that lookups using the dirty
- // map find the entry.
- p unsafe.Pointer // *interface{}
-}
-
-func newEntry(i interface{}) *entry {
- return &entry{p: unsafe.Pointer(&i)}
-}
-
-// Load returns the value stored in the map for a key, or nil if no
-// value is present.
-// The ok result indicates whether value was found in the map.
-func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
- read, _ := m.read.Load().(readOnly)
- e, ok := read.m[key]
- if !ok && read.amended {
- m.mu.Lock()
- // Avoid reporting a spurious miss if m.dirty got promoted while we were
- // blocked on m.mu. (If further loads of the same key will not miss, it's
- // not worth copying the dirty map for this key.)
- read, _ = m.read.Load().(readOnly)
- e, ok = read.m[key]
- if !ok && read.amended {
- e, ok = m.dirty[key]
- // Regardless of whether the entry was present, record a miss: this key
- // will take the slow path until the dirty map is promoted to the read
- // map.
- m.missLocked()
- }
- m.mu.Unlock()
- }
- if !ok {
- return nil, false
- }
- return e.load()
-}
-
-func (e *entry) load() (value interface{}, ok bool) {
- p := atomic.LoadPointer(&e.p)
- if p == nil || p == expunged {
- return nil, false
- }
- return *(*interface{})(p), true
-}
-
-// Store sets the value for a key.
-func (m *Map) Store(key, value interface{}) {
- read, _ := m.read.Load().(readOnly)
- if e, ok := read.m[key]; ok && e.tryStore(&value) {
- return
- }
-
- m.mu.Lock()
- read, _ = m.read.Load().(readOnly)
- if e, ok := read.m[key]; ok {
- if e.unexpungeLocked() {
- // The entry was previously expunged, which implies that there is a
- // non-nil dirty map and this entry is not in it.
- m.dirty[key] = e
- }
- e.storeLocked(&value)
- } else if e, ok := m.dirty[key]; ok {
- e.storeLocked(&value)
- } else {
- if !read.amended {
- // We're adding the first new key to the dirty map.
- // Make sure it is allocated and mark the read-only map as incomplete.
- m.dirtyLocked()
- m.read.Store(readOnly{m: read.m, amended: true})
- }
- m.dirty[key] = newEntry(value)
- }
- m.mu.Unlock()
-}
-
-// tryStore stores a value if the entry has not been expunged.
-//
-// If the entry is expunged, tryStore returns false and leaves the entry
-// unchanged.
-func (e *entry) tryStore(i *interface{}) bool {
- p := atomic.LoadPointer(&e.p)
- if p == expunged {
- return false
- }
- for {
- if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
- return true
- }
- p = atomic.LoadPointer(&e.p)
- if p == expunged {
- return false
- }
- }
-}
-
-// unexpungeLocked ensures that the entry is not marked as expunged.
-//
-// If the entry was previously expunged, it must be added to the dirty map
-// before m.mu is unlocked.
-func (e *entry) unexpungeLocked() (wasExpunged bool) {
- return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
-}
-
-// storeLocked unconditionally stores a value to the entry.
-//
-// The entry must be known not to be expunged.
-func (e *entry) storeLocked(i *interface{}) {
- atomic.StorePointer(&e.p, unsafe.Pointer(i))
-}
-
-// LoadOrStore returns the existing value for the key if present.
-// Otherwise, it stores and returns the given value.
-// The loaded result is true if the value was loaded, false if stored.
-func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
- // Avoid locking if it's a clean hit.
- read, _ := m.read.Load().(readOnly)
- if e, ok := read.m[key]; ok {
- actual, loaded, ok := e.tryLoadOrStore(value)
- if ok {
- return actual, loaded
- }
- }
-
- m.mu.Lock()
- read, _ = m.read.Load().(readOnly)
- if e, ok := read.m[key]; ok {
- if e.unexpungeLocked() {
- m.dirty[key] = e
- }
- actual, loaded, _ = e.tryLoadOrStore(value)
- } else if e, ok := m.dirty[key]; ok {
- actual, loaded, _ = e.tryLoadOrStore(value)
- m.missLocked()
- } else {
- if !read.amended {
- // We're adding the first new key to the dirty map.
- // Make sure it is allocated and mark the read-only map as incomplete.
- m.dirtyLocked()
- m.read.Store(readOnly{m: read.m, amended: true})
- }
- m.dirty[key] = newEntry(value)
- actual, loaded = value, false
- }
- m.mu.Unlock()
-
- return actual, loaded
-}
-
-// tryLoadOrStore atomically loads or stores a value if the entry is not
-// expunged.
-//
-// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
-// returns with ok==false.
-func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
- p := atomic.LoadPointer(&e.p)
- if p == expunged {
- return nil, false, false
- }
- if p != nil {
- return *(*interface{})(p), true, true
- }
-
- // Copy the interface after the first load to make this method more amenable
- // to escape analysis: if we hit the "load" path or the entry is expunged, we
- // shouldn't bother heap-allocating.
- ic := i
- for {
- if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
- return i, false, true
- }
- p = atomic.LoadPointer(&e.p)
- if p == expunged {
- return nil, false, false
- }
- if p != nil {
- return *(*interface{})(p), true, true
- }
- }
-}
-
-// Delete deletes the value for a key.
-func (m *Map) Delete(key interface{}) {
- read, _ := m.read.Load().(readOnly)
- e, ok := read.m[key]
- if !ok && read.amended {
- m.mu.Lock()
- read, _ = m.read.Load().(readOnly)
- e, ok = read.m[key]
- if !ok && read.amended {
- delete(m.dirty, key)
- }
- m.mu.Unlock()
- }
- if ok {
- e.delete()
- }
-}
-
-func (e *entry) delete() (hadValue bool) {
- for {
- p := atomic.LoadPointer(&e.p)
- if p == nil || p == expunged {
- return false
- }
- if atomic.CompareAndSwapPointer(&e.p, p, nil) {
- return true
- }
- }
-}
-
-// Range calls f sequentially for each key and value present in the map.
-// If f returns false, range stops the iteration.
-//
-// Range does not necessarily correspond to any consistent snapshot of the Map's
-// contents: no key will be visited more than once, but if the value for any key
-// is stored or deleted concurrently, Range may reflect any mapping for that key
-// from any point during the Range call.
-//
-// Range may be O(N) with the number of elements in the map even if f returns
-// false after a constant number of calls.
-func (m *Map) Range(f func(key, value interface{}) bool) {
- // We need to be able to iterate over all of the keys that were already
- // present at the start of the call to Range.
- // If read.amended is false, then read.m satisfies that property without
- // requiring us to hold m.mu for a long time.
- read, _ := m.read.Load().(readOnly)
- if read.amended {
- // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
- // (assuming the caller does not break out early), so a call to Range
- // amortizes an entire copy of the map: we can promote the dirty copy
- // immediately!
- m.mu.Lock()
- read, _ = m.read.Load().(readOnly)
- if read.amended {
- read = readOnly{m: m.dirty}
- m.read.Store(read)
- m.dirty = nil
- m.misses = 0
- }
- m.mu.Unlock()
- }
-
- for k, e := range read.m {
- v, ok := e.load()
- if !ok {
- continue
- }
- if !f(k, v) {
- break
- }
- }
-}
-
-func (m *Map) missLocked() {
- m.misses++
- if m.misses < len(m.dirty) {
- return
- }
- m.read.Store(readOnly{m: m.dirty})
- m.dirty = nil
- m.misses = 0
-}
-
-func (m *Map) dirtyLocked() {
- if m.dirty != nil {
- return
- }
-
- read, _ := m.read.Load().(readOnly)
- m.dirty = make(map[interface{}]*entry, len(read.m))
- for k, e := range read.m {
- if !e.tryExpungeLocked() {
- m.dirty[k] = e
- }
- }
-}
-
-func (e *entry) tryExpungeLocked() (isExpunged bool) {
- p := atomic.LoadPointer(&e.p)
- for p == nil {
- if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
- return true
- }
- p = atomic.LoadPointer(&e.p)
- }
- return p == expunged
-}
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/CONTRIBUTING.md b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/CONTRIBUTING.md
index d0485e8..d0485e8 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/CONTRIBUTING.md
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/CONTRIBUTING.md
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/LICENSE b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/PATENTS b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/PATENTS
index 7330990..7330990 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/PATENTS
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/PATENTS
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/README.md b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/README.md
index 7c1c8f6..7c1c8f6 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/README.md
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/README.md
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/codereview.cfg b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/codereview.cfg
index 3f8b14b..3f8b14b 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/codereview.cfg
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/codereview.cfg
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/errgroup.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/errgroup.go
index b18efb7..948a3ee 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/errgroup.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/errgroup.go
@@ -4,6 +4,9 @@
// Package errgroup provides synchronization, error propagation, and Context
// cancelation for groups of goroutines working on subtasks of a common task.
+//
+// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
+// returning errors.
package errgroup
import (
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/errgroup_example_md5all_test.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/errgroup_example_md5all_test.go
index 739b336..739b336 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/errgroup_example_md5all_test.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/errgroup_example_md5all_test.go
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/errgroup_test.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/errgroup_test.go
index 0358842..0358842 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/errgroup_test.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/errgroup_test.go
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/go120.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/go120.go
index 7d419d3..f93c740 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/go120.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/go120.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.20
-// +build go1.20
package errgroup
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/go120_test.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/go120_test.go
index 0c354a1..068f104 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/go120_test.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/go120_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.20
-// +build go1.20
package errgroup_test
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/pre_go120.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/pre_go120.go
index 1795c18..88ce334 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/errgroup/pre_go120.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/errgroup/pre_go120.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.20
-// +build !go1.20
package errgroup
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/go.mod b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/go.mod
index 782b734..74bd0ac 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/go.mod
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/go.mod
@@ -1,3 +1,3 @@
module golang.org/x/sync
-go 1.17
+go 1.18
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore.go
index 30f632c..b618162 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore.go
@@ -35,11 +35,25 @@ type Weighted struct {
// Acquire acquires the semaphore with a weight of n, blocking until resources
// are available or ctx is done. On success, returns nil. On failure, returns
// ctx.Err() and leaves the semaphore unchanged.
-//
-// If ctx is already done, Acquire may still succeed without blocking.
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
+ done := ctx.Done()
+
s.mu.Lock()
+ select {
+ case <-done:
+ // ctx becoming done has "happened before" acquiring the semaphore,
+ // whether it became done before the call began or while we were
+ // waiting for the mutex. We prefer to fail even if we could acquire
+ // the mutex without blocking.
+ s.mu.Unlock()
+ return ctx.Err()
+ default:
+ }
if s.size-s.cur >= n && s.waiters.Len() == 0 {
+ // Since we hold s.mu and haven't synchronized since checking done, if
+ // ctx becomes done before we return here, it becoming done must have
+ // "happened concurrently" with this call - it cannot "happen before"
+ // we return in this branch. So, we're ok to always acquire here.
s.cur += n
s.mu.Unlock()
return nil
@@ -48,7 +62,7 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
if n > s.size {
// Don't make other Acquire calls block on one that's doomed to fail.
s.mu.Unlock()
- <-ctx.Done()
+ <-done
return ctx.Err()
}
@@ -58,14 +72,14 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
s.mu.Unlock()
select {
- case <-ctx.Done():
- err := ctx.Err()
+ case <-done:
s.mu.Lock()
select {
case <-ready:
- // Acquired the semaphore after we were canceled. Rather than trying to
- // fix up the queue, just pretend we didn't notice the cancelation.
- err = nil
+ // Acquired the semaphore after we were canceled.
+ // Pretend we didn't and put the tokens back.
+ s.cur -= n
+ s.notifyWaiters()
default:
isFront := s.waiters.Front() == elem
s.waiters.Remove(elem)
@@ -75,9 +89,19 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
}
}
s.mu.Unlock()
- return err
+ return ctx.Err()
case <-ready:
+ // Acquired the semaphore. Check that ctx isn't already done.
+ // We check the done channel instead of calling ctx.Err because we
+ // already have the channel, and ctx.Err is O(n) with the nesting
+ // depth of ctx.
+ select {
+ case <-done:
+ s.Release(n)
+ return ctx.Err()
+ default:
+ }
return nil
}
}
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore_bench_test.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore_bench_test.go
index 3b60ca8..aa64258 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore_bench_test.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore_bench_test.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.7
-// +build go1.7
-
package semaphore_test
import (
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore_example_test.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore_example_test.go
index e75cd79..e75cd79 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore_example_test.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore_example_test.go
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore_test.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore_test.go
index 6e8eca2..61012d6 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/semaphore/semaphore_test.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/semaphore/semaphore_test.go
@@ -200,3 +200,38 @@ func TestAllocCancelDoesntStarve(t *testing.T) {
}
sem.Release(1)
}
+
+func TestWeightedAcquireCanceled(t *testing.T) {
+ // https://go.dev/issue/63615
+ sem := semaphore.NewWeighted(2)
+ ctx, cancel := context.WithCancel(context.Background())
+ sem.Acquire(context.Background(), 1)
+ ch := make(chan struct{})
+ go func() {
+ // Synchronize with the Acquire(2) below.
+ for sem.TryAcquire(1) {
+ sem.Release(1)
+ }
+ // Now cancel ctx, and then release the token.
+ cancel()
+ sem.Release(1)
+ close(ch)
+ }()
+ // Since the context closing happens before enough tokens become available,
+ // this Acquire must fail.
+ if err := sem.Acquire(ctx, 2); err != context.Canceled {
+ t.Errorf("Acquire with canceled context returned wrong error: want context.Canceled, got %v", err)
+ }
+ // There must always be two tokens in the semaphore after the other
+ // goroutine releases the one we held at the start.
+ <-ch
+ if !sem.TryAcquire(2) {
+ t.Fatal("TryAcquire after canceled Acquire failed")
+ }
+ // Additionally verify that we don't acquire with a done context even when
+ // we wouldn't need to block to do so.
+ sem.Release(2)
+ if err := sem.Acquire(ctx, 1); err != context.Canceled {
+ t.Errorf("Acquire with canceled context returned wrong error: want context.Canceled, got %v", err)
+ }
+}
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/singleflight/singleflight.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/singleflight/singleflight.go
index 8473fb7..4051830 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/singleflight/singleflight.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/singleflight/singleflight.go
@@ -31,6 +31,15 @@ func (p *panicError) Error() string {
return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
}
+func (p *panicError) Unwrap() error {
+ err, ok := p.value.(error)
+ if !ok {
+ return nil
+ }
+
+ return err
+}
+
func newPanicError(v interface{}) error {
stack := debug.Stack()
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/singleflight/singleflight_test.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/singleflight/singleflight_test.go
index bb25a1e..1e85b17 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/singleflight/singleflight_test.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/singleflight/singleflight_test.go
@@ -19,6 +19,69 @@ import (
"time"
)
+type errValue struct{}
+
+func (err *errValue) Error() string {
+ return "error value"
+}
+
+func TestPanicErrorUnwrap(t *testing.T) {
+ t.Parallel()
+
+ testCases := []struct {
+ name string
+ panicValue interface{}
+ wrappedErrorType bool
+ }{
+ {
+ name: "panicError wraps non-error type",
+ panicValue: &panicError{value: "string value"},
+ wrappedErrorType: false,
+ },
+ {
+ name: "panicError wraps error type",
+ panicValue: &panicError{value: new(errValue)},
+ wrappedErrorType: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ var recovered interface{}
+
+ group := &Group{}
+
+ func() {
+ defer func() {
+ recovered = recover()
+ t.Logf("after panic(%#v) in group.Do, recovered %#v", tc.panicValue, recovered)
+ }()
+
+ _, _, _ = group.Do(tc.name, func() (interface{}, error) {
+ panic(tc.panicValue)
+ })
+ }()
+
+ if recovered == nil {
+ t.Fatal("expected a non-nil panic value")
+ }
+
+ err, ok := recovered.(error)
+ if !ok {
+ t.Fatalf("recovered non-error type: %T", recovered)
+ }
+
+ if !errors.Is(err, new(errValue)) && tc.wrappedErrorType {
+ t.Errorf("unexpected wrapped error type %T; want %T", err, new(errValue))
+ }
+ })
+ }
+}
+
func TestDo(t *testing.T) {
var g Group
v, err, _ := g.Do("key", func() (interface{}, error) {
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/go19.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map.go
index fa04dba..c9a07f3 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/go19.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map.go
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.9
-// +build go1.9
-
+// Package syncmap provides a concurrent map implementation.
+// This was the prototype for sync.Map which was added to the standard library's
+// sync package in Go 1.9. https://golang.org/pkg/sync/#Map.
package syncmap
import "sync" // home to the standard library's sync.map implementation as of Go 1.9
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map_bench_test.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map_bench_test.go
index b279b4f..b279b4f 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map_bench_test.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map_bench_test.go
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map_reference_test.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map_reference_test.go
index 923c51b..923c51b 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map_reference_test.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map_reference_test.go
diff --git a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map_test.go b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map_test.go
index bf69f50..bf69f50 100644
--- a/dependencies/pkg/mod/golang.org/x/sync@v0.3.0/syncmap/map_test.go
+++ b/dependencies/pkg/mod/golang.org/x/sync@v0.7.0/syncmap/map_test.go
diff --git a/dependencies/pkg/sumdb/sum.golang.org/latest b/dependencies/pkg/sumdb/sum.golang.org/latest
index 18250de..bba7fad 100644
--- a/dependencies/pkg/sumdb/sum.golang.org/latest
+++ b/dependencies/pkg/sumdb/sum.golang.org/latest
@@ -1,5 +1,5 @@
go.sum database tree
-18954122
-ueKaAz+x9Ed0b2h+3sFs/CCKpKmxNi9HlAqT2akr9b4=
+25097955
+gkiyFx75xsUmlN2G1BLSj49TRPdf1yMn95CIxt4/C2Y=
-— sum.golang.org Az3grhMncUODC4LNDsijagZ6Mipiz47qDssMbbKDPvxYKFQMyIAlekTVZFI8wrR7Z61qNN8JOXY7mmBVLFfvK2PCPwg=
+— sum.golang.org Az3grhXF53na7sglMFhGsisjkRmtI1nyPAkN4e89oSAyAOPLLBJpqKzg/Yxcx6vtDOu5fJJfXb1FAuAkqO+wZ0KNCgU=
diff --git a/doc/01-About.md b/doc/01-About.md
index 8211e57..056da3f 100644
--- a/doc/01-About.md
+++ b/doc/01-About.md
@@ -3,13 +3,14 @@
Icinga DB is a set of components for publishing, synchronizing and
visualizing monitoring data in the Icinga ecosystem, consisting of:
-* The Icinga DB daemon, which synchronizes monitoring data between a Redis server and a database
+* The Icinga DB daemon,
+ which synchronizes monitoring data between a Redis®[\*](TRADEMARKS.md#redis) server and a database
* Icinga 2 with its [Icinga DB feature](https://icinga.com/docs/icinga-2/latest/doc/14-features/#icinga-db) enabled,
- responsible for publishing the data to the Redis server, i.e. configuration and its runtime updates, check results,
+ responsible for publishing the data to the Redis® server, i.e. configuration and its runtime updates, check results,
state changes, downtimes, acknowledgements, notifications, and other events such as flapping
* And Icinga Web with the
[Icinga DB Web](https://icinga.com/docs/icinga-db-web) module enabled,
- which connects to both Redis and the database to display and work with the most up-to-date data
+ which connects to both Redis® and the database to display and work with the most up-to-date data
![Icinga DB Architecture](images/icingadb-architecture.png)
diff --git a/doc/02-Installation.md b/doc/02-Installation.md
index 8e04a1f..d5bd20b 100644
--- a/doc/02-Installation.md
+++ b/doc/02-Installation.md
@@ -12,15 +12,15 @@ see the [Upgrading](04-Upgrading.md) documentation for the necessary steps.
![Icinga DB Daemon](images/icingadb-daemon.png)
Before installing Icinga DB, make sure you have installed [Icinga 2](https://icinga.com/docs/icinga-2),
-set up a Redis server, and enabled the `icingadb` feature.
+set up a Redis® server, and enabled the `icingadb` feature.
The Icinga 2 installation documentation covers all the necessary steps.
Additionally, Icinga offers the `icingadb-redis` package for all supported operating systems,
-which ships an up-to-date Redis server version and is pre-configured for the Icinga DB components.
+which ships an up-to-date Redis® open source server version and is pre-configured for the Icinga DB components.
!!! tip
Although Icinga DB can run anywhere in an Icinga environment,
- we recommend to install it where the corresponding Icinga 2 node and Redis server is running to
+ we recommend to install it where the corresponding Icinga 2 node and Redis® server is running to
keep latency between the components low.
<!-- {% else %} -->
@@ -271,7 +271,7 @@ psql -U icingadb icingadb < /usr/share/icingadb/schema/pgsql/schema.sql
Icinga DB installs its configuration file to `/etc/icingadb/config.yml`,
pre-populating most of the settings for a local setup. Before running Icinga DB,
-adjust the Redis and database credentials and, if necessary, the connection configuration.
+adjust the Redis® and database credentials and, if necessary, the connection configuration.
The configuration file explains general settings.
All available settings can be found under [Configuration](03-Configuration.md).
@@ -286,8 +286,8 @@ systemctl enable --now icingadb
## Installing Icinga DB Web
-With Icinga 2, Redis, Icinga DB and the database fully set up, it is now time to install Icinga DB Web,
-which connects to both Redis and the database to display and work with the monitoring data.
+With Icinga 2, Redis®, Icinga DB and the database fully set up, it is now time to install Icinga DB Web,
+which connects to both Redis® and the database to display and work with the monitoring data.
![Icinga DB Web](images/icingadb-web.png)
diff --git a/doc/03-Configuration.md b/doc/03-Configuration.md
index af92783..21edcf5 100644
--- a/doc/03-Configuration.md
+++ b/doc/03-Configuration.md
@@ -3,45 +3,65 @@
The configuration is stored in `/etc/icingadb/config.yml`.
See [config.example.yml](../config.example.yml) for an example configuration.
-## Redis Configuration
+## Redis® Configuration
-Connection configuration for the Redis server where Icinga 2 writes its configuration, state and history items.
+Connection configuration for the Redis® server where Icinga 2 writes its configuration, state and history items.
This is the same connection as configured in the
[Icinga DB feature](https://icinga.com/docs/icinga-2/latest/doc/14-features/#icinga-db) of
-the corresponding Icinga 2 node. High availability setups require a dedicated Redis server per Icinga 2 node and
+the corresponding Icinga 2 node. High availability setups require a dedicated Redis® server per Icinga 2 node and
therefore a dedicated Icinga DB instance that connects to it.
-| Option | Description |
-|----------|------------------------------------------------------------------------------------------------------------------------------------|
-| host | **Required.** Redis host or absolute Unix socket path. |
-| port | **Optional.** Redis port. Defaults to `6380` since the Redis server provided by the `icingadb-redis` package listens on that port. |
-| password | **Optional.** The password to use. |
-| tls | **Optional.** Whether to use TLS. |
-| cert | **Optional.** Path to TLS client certificate. |
-| key | **Optional.** Path to TLS private key. |
-| ca | **Optional.** Path to TLS CA certificate. |
-| insecure | **Optional.** Whether not to verify the peer. |
+| Option | Description |
+|----------|-------------------------------------------------------------------------------------------------------------------------|
+| host | **Required.** Host name or address, or absolute Unix socket path. |
+| port | **Optional.** TCP port. Defaults to `6380` matching the Redis® open source server port in the `icingadb-redis` package. |
+| password | **Optional.** Authentication password. |
+| tls | **Optional.** Whether to use TLS. |
+| cert | **Optional.** Path to TLS client certificate. |
+| key | **Optional.** Path to TLS private key. |
+| ca | **Optional.** Path to TLS CA certificate. |
+| insecure | **Optional.** Whether not to verify the peer. |
## Database Configuration
Connection configuration for the database to which Icinga DB synchronizes monitoring data.
This is also the database used in
-[Icinga DB Web](https://icinga.com/docs/icinga-db/latest/icinga-db-web/doc/01-About/) to view and work with the data.
+[Icinga DB Web](https://icinga.com/docs/icinga-db-web) to view and work with the data.
In high availability setups, all Icinga DB instances must write to the same database.
-| Option | Description |
-|----------|--------------------------------------------------------------------------------------------------------|
-| type | **Optional.** Either `mysql` (default) or `pgsql`. |
-| host | **Required.** Database host or absolute Unix socket path. |
-| port | **Optional.** Database port. By default, the MySQL or PostgreSQL port, depending on the database type. |
-| database | **Required.** Database name. |
-| user | **Required.** Database username. |
-| password | **Optional.** Database password. |
-| tls | **Optional.** Whether to use TLS. |
-| cert | **Optional.** Path to TLS client certificate. |
-| key | **Optional.** Path to TLS private key. |
-| ca | **Optional.** Path to TLS CA certificate. |
-| insecure | **Optional.** Whether not to verify the peer. |
+| Option | Description |
+|----------|------------------------------------------------------------------------------------------------------------------------------------------------|
+| type | **Optional.** Either `mysql` (default) or `pgsql`. |
+| host | **Required.** Database host or absolute Unix socket path. |
+| port | **Optional.** Database port. By default, the MySQL or PostgreSQL port, depending on the database type. |
+| database | **Required.** Database name. |
+| user | **Required.** Database username. |
+| password | **Optional.** Database password. |
+| tls | **Optional.** Whether to use TLS. |
+| cert | **Optional.** Path to TLS client certificate. |
+| key | **Optional.** Path to TLS private key. |
+| ca | **Optional.** Path to TLS CA certificate. |
+| insecure | **Optional.** Whether not to verify the peer. |
+| options | **Optional.** List of low-level [database options](#database-options) that can be set to influence some Icinga DB internal default behaviours. |
+
+### Database Options
+
+Each of these configuration options are highly technical with thoroughly considered and tested default values that you
+should only change when you exactly know what you are doing. You can use these options to influence the Icinga DB default
+behaviour, how it interacts with databases, thus the defaults are usually sufficient for most users and do not need any
+manual adjustments.
+
+!!! important
+
+ Do not change the defaults if you do not have to!
+
+| Option | Description |
+|--------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------|
+| max_connections | **Optional.** Maximum number of database connections Icinga DB is allowed to open in parallel if necessary. Defaults to `16`. |
+| max_connections_per_table | **Optional.** Maximum number of queries Icinga DB is allowed to execute on a single table concurrently. Defaults to `8`. |
+| max_placeholders_per_statement | **Optional.** Maximum number of placeholders Icinga DB is allowed to use for a single SQL statement. Defaults to `8192`. |
+| max_rows_per_transaction | **Optional.** Maximum number of rows Icinga DB is allowed to `SELECT`,`DELETE`,`UPDATE` or `INSERT` in a single transaction. Defaults to `8192`. |
+| wsrep_sync_wait | **Optional.** Enforce [Galera cluster](#galera-cluster) nodes to perform strict cluster-wide causality checks. Defaults to `7`. |
## Logging Configuration
@@ -56,19 +76,19 @@ Configuration of the logging component used by Icinga DB.
### Logging Components
-| Component | Description |
-|-------------------|--------------------------------------------------------------------------------|
-| config-sync | Config object synchronization between Redis and MySQL. |
-| database | Database connection status and queries. |
-| dump-signals | Dump signals received from Icinga. |
-| heartbeat | Icinga heartbeats received through Redis. |
-| high-availability | Manages responsibility of Icinga DB instances. |
-| history-sync | Synchronization of history entries from Redis to MySQL. |
-| overdue-sync | Calculation and synchronization of the overdue status of checkables. |
-| redis | Redis connection status and queries. |
-| retention | Deletes historical data that exceed their configured retention period. |
-| runtime-updates | Runtime updates of config objects after the initial config synchronization. |
-| telemetry | Reporting of Icinga DB status to Icinga 2 via Redis (for monitoring purposes). |
+| Component | Description |
+|-------------------|---------------------------------------------------------------------------------|
+| config-sync | Config object synchronization between Redis® and MySQL. |
+| database | Database connection status and queries. |
+| dump-signals | Dump signals received from Icinga. |
+| heartbeat | Icinga heartbeats received through Redis®. |
+| high-availability | Manages responsibility of Icinga DB instances. |
+| history-sync | Synchronization of history entries from Redis® to MySQL. |
+| overdue-sync | Calculation and synchronization of the overdue status of checkables. |
+| redis | Redis® connection status and queries. |
+| retention | Deletes historical data that exceed their configured retention period. |
+| runtime-updates | Runtime updates of config objects after the initial config synchronization. |
+| telemetry | Reporting of Icinga DB status to Icinga 2 via Redis® (for monitoring purposes). |
## Retention
@@ -83,6 +103,8 @@ allowing to keep this information for longer with a smaller storage footprint.
|--------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| history-days | **Optional.** Number of days to retain historical data for all history categories. Use `options` in order to enable retention only for specific categories or to override the retention days configured here. |
| sla-days | **Optional.** Number of days to retain historical data for SLA reporting. |
+| interval | **Optional.** Interval for periodically cleaning up the historical data, defined as [duration string](#duration-string). Defaults to `"1h"`. |
+| count | **Optional.** Number of old historical data a single query can delete in a `"DELETE FROM ... LIMIT count"` manner. Defaults to `5000`. |
| options | **Optional.** Map of history category to number of days to retain its data. Available categories are `acknowledgement`, `comment`, `downtime`, `flapping`, `notification`, `sla` and `state`. |
## Appendix
@@ -91,3 +113,21 @@ allowing to keep this information for longer with a smaller storage footprint.
A duration string is a sequence of decimal numbers and a unit suffix, such as `"20s"`.
Valid units are `"ms"`, `"s"`, `"m"` and `"h"`.
+
+### Galera Cluster
+
+Icinga DB expects a more consistent behaviour from its database than a
+[Galera cluster](https://mariadb.com/kb/en/what-is-mariadb-galera-cluster/) provides by default. To accommodate this,
+Icinga DB sets the [wsrep_sync_wait](https://mariadb.com/kb/en/galera-cluster-system-variables/#wsrep_sync_wait) system
+variable for all its database connections. Consequently, strict cluster-wide causality checks are enforced before
+executing specific SQL queries, which are determined by the value set in the `wsrep_sync_wait` system variable.
+By default, Icinga DB sets this to `7`, which includes `READ, UPDATE, DELETE, INSERT, REPLACE` query types and is
+usually sufficient. Unfortunately, this also has the downside that every single Icinga DB query will be blocked until
+the cluster nodes resynchronise their states after each executed query, and may result in degraded performance.
+
+However, this does not necessarily have to be the case if, for instance, Icinga DB is only allowed to connect to a
+single cluster node at a time. This is the case when a load balancer does not randomly route connections to all the
+nodes evenly, but always to the same node until it fails, or if your database cluster nodes have a virtual IP address
+fail over assigned. In such situations, you can set the `wsrep_sync_wait` system variable to `0` in the
+`/etc/icingadb/config.yml` file to disable it entirely, as Icinga DB doesn't have to wait for cluster
+synchronisation then.
diff --git a/doc/04-Upgrading.md b/doc/04-Upgrading.md
index 5a085e0..74382e5 100644
--- a/doc/04-Upgrading.md
+++ b/doc/04-Upgrading.md
@@ -3,6 +3,98 @@
Specific version upgrades are described below. Please note that version upgrades are incremental.
If you are upgrading across multiple versions, make sure to follow the steps for each of them.
+## Upgrading to Icinga DB v1.2.0
+
+Please apply the `1.2.0.sql` upgrade script to your database. For package installations, you can find this file at
+`/usr/share/icingadb/schema/mysql/upgrades/` or `/usr/share/icingadb/schema/pgsql/upgrades/`, depending on your
+database vendor.
+
+As the daemon checks the schema version, the recommended way to perform the upgrade is to stop the daemon, apply the
+schema upgrade and then start the new daemon version. If you want to minimize downtime as much as possible, it is safe
+to apply this schema upgrade while the Icinga DB v1.1.1 daemon is still running and then restart the daemon with the
+new version. Please keep in mind that depending on the distribution, your package manager may automatically attempt to
+restart the daemon when upgrading the package.
+
+!!! warning
+
+ With MySQL and MariaDB, a locking issue can occur if the schema upgrade is applied while the history view is
+ accessed in Icinga DB Web. This can result in the upgrade being delayed unnecessarily and blocking other queries.
+ Please see [unblock history tables](#unblock-history-tables) for how to detect and resolve this situation.
+
+### Upgrading the state_history Table
+
+This release includes fixes for hosts and services reaching check attempt 256. However, on existing installations,
+the schema upgrade required to fix the history tables isn't automatically applied by `1.2.0.sql` as a rewrite of the
+whole `state_history` table is required. This can take a lot of time depending on the history size and the performance
+of the database. During this time that table will be locked exclusively and can't be accessed otherwise. This means that
+the existing history can't be viewed in Icinga Web and new history entries will be buffered in Redis®.
+
+There is a separate upgrade script `optional/1.2.0-history.sql` to perform the rewrite of the `state_history` table.
+This allows you to postpone part of the upgrade to a longer maintenance window in the future, or skip it entirely
+if you deem this safe for your installation.
+
+!!! warning
+
+ Until `optional/1.2.0-history.sql` is applied, you'll have to lower `max_check_attempts` to 255 or less, otherwise
+ Icinga DB will crash with a database error if hosts/services reach check attempt 256. If you need to lower
+ `max_check_attempts` but want to keep the same timespan from an outage to a hard state, you can raise
+ `retry_interval` instead so that `max_check_attempts * retry_interval` stays the same.
+
+If you apply it, be sure that `1.2.0.sql` was already applied before. Do not interrupt it! At best use tmux/screen not
+to lose your SSH session.
+
+### Unblock History Tables
+
+!!! info
+
+ You don't need to read this section if you are using PostgreSQL. This applies to MySQL/MariaDB users only.
+
+In order to fix a loading performance issue of the history view in Icinga DB Web, this upgrade script adds an
+appropriate index on the `history` table. Creating this new index normally takes place without blocking any other
+queries. However, this may hang for a relatively considerable time, blocking all Icinga DB queries on all`*_history`
+tables and the `history` table inclusively if there is an ongoing, long-running query on the `history` table. One way
+of causing this to happen is if an Icinga Web user accesses the `icingadb/history` view just before you are running
+this script. Depending on how many entries you have in the history table, Icinga DB Web may take quite a long time to
+load, until your web servers timeout (if any) kicks in.
+
+When you observe that the upgrade script has been taking unusually long (`> 60s`) to complete, you can perform the
+following analysis on another console and unblock it if necessary. It is important to note though that the script may
+need some time to perform the reindexing on the `history` table even if it is not blocked. Nonetheless, you can use the
+`show processlist` command to determine whether an excessive number of queries have been stuck in a waiting state.
+
+```
+MariaDB [icingadb]> show processlist;
++------+-----+-----+----------+-----+------+---------------------------------+------------------------------------+-----+
+| Id | ... | ... | db | ... | Time | State | Info | ... |
++------+-----+-----+----------+-----+------+---------------------------------+------------------------------------+-----+
+| 1475 | ... | ... | icingadb | ... | 1222 | Waiting for table metadata lock | INSERT INTO "notification_history" | ... |
+| 1485 | ... | ... | icingadb | ... | 1262 | Creating sort index | SELECT history.id, history.... | ... |
+| 1494 | ... | ... | icingadb | ... | 1224 | Waiting for table metadata lock | ALTER TABLE history ADD INDEX ... | ... |
+| 1499 | ... | ... | icingadb | ... | 1215 | Waiting for table metadata lock | INSERT INTO "notification_history" | ... |
+| 1500 | ... | ... | icingadb | ... | 1215 | Waiting for table metadata lock | INSERT INTO "state_history" ... | ... |
+| ... | ... | ... | ... | ... | ... | ... | ... | ... |
++------+-----+-----+----------+-----+------+---------------------------------+------------------------------------+-----+
+```
+
+In the above output are way too many Icinga DB queries, including the `ALTER TABLE history ADD INDEX` query from the
+upgrade script, waiting for a metadata lock, they are just minimised to the bare essentials. Unfortunately, only one of
+these queries is holding the `table metadata lock` that everyone else is now waiting for, which in this case is a
+`SELECT` statement initiated by Icinga DB Web in the `icingadb/history` view, which takes an unimaginably long time.
+Note that there might be multiple `SELECT` statements started before the upgrade script in your case when the history
+view of your Icinga DB Web is opened by different Icinga Web users at the same time.
+
+You can now either just wait for the `SELECT` statements to finish by themselves and let them block the upgrade script
+and all Icinga DB queries on all `*_history` tables or forcibly terminate them and let the remaining queries do their
+work. In this case, cancelling that one blocking `SELECT` query will let the upgrade script continue normally without
+blocking any other queries.
+```
+MariaDB [icingadb]> kill 1485;
+```
+In case you are insecure about which Icinga DB Web queries are blocking, you may simply cancel all long-running
+`SELECT` statements listed with `show processlist` (see column `Time`). Cancelling a `SELECT` query will neither
+crash Icinga DB nor corrupt your database, so feel free to abort every single one of them matching the Icinga DB
+database (see column `db`).
+
## Upgrading to Icinga DB v1.1.1
Please apply the `1.1.1.sql` upgrade script to your database.
@@ -11,7 +103,7 @@ For package installations, you can find this file at `/usr/share/icingadb/schema
Note that this upgrade will change the `history` table, which can take some time depending on the size of the table and
the performance of the database. While the upgrade is running, that table will be locked and can't be accessed. This
-means that the existing history can't be viewed in Icinga Web and new history entries will be buffered in Redis.
+means that the existing history can't be viewed in Icinga Web and new history entries will be buffered in Redis®.
As the daemon checks the schema version, the recommended way to perform the upgrade is to stop the daemon, apply the
schema upgrade and then start the new daemon version. If you want to minimize downtime as much as possible, it is safe
@@ -32,20 +124,20 @@ restart the daemon when upgrading the package.
## Upgrading to Icinga DB RC2
-Icinga DB RC2 is a complete rewrite compared to RC1. Because of this, a lot has changed in the Redis and database
+Icinga DB RC2 is a complete rewrite compared to RC1. Because of this, a lot has changed in the Redis® and database
schema, which is why they have to be deleted and recreated. The configuration file has changed from `icingadb.ini`
to `config.yml`. Instead of the INI format, we are now using YAML and have introduced more configuration options. We
-have also changed the packages of `icingadb-redis`, which is why the Redis CLI commands are now prefixed with `icingadb`
-instead of just `icinga`, i.e. the Redis CLI is now accessed via `icingadb-redis-cli`.
+have also changed the packages of `icingadb-redis`, which is why the Redis® CLI commands are now prefixed with `icingadb`
+instead of just `icinga`, i.e. the Redis® CLI is now accessed via `icingadb-redis-cli`.
Please follow the steps below to upgrade to Icinga DB RC2:
1. Stop Icinga 2 and Icinga DB.
-2. Flush your Redis instances using `icinga-redis-cli flushall` (note the `icinga` prefix as we did not
+2. Flush your Redis® instances using `icinga-redis-cli flushall` (note the `icinga` prefix as we did not
upgrade `icingadb-redis` yet) and stop them afterwards.
3. Upgrade Icinga 2 to version 2.13.2 or newer.
4. Remove the `icinga-redis` package where installed as it may conflict with `icingadb-redis`.
-5. Install Icinga DB Redis (`icingadb-redis`) on your primary Icinga 2 nodes to version 6.2.6 or newer.
+5. Install Redis® (`icingadb-redis`) on your primary Icinga 2 nodes to version 6.2.6 or newer.
6. Upgrade Icinga DB to RC2.
7. Drop the Icinga DB MySQL database and recreate it using the provided schema.
-8. Start Icinga DB Redis, Icinga 2 and Icinga DB.
+8. Start Redis®, Icinga 2 and Icinga DB.
diff --git a/doc/05-Distributed-Setups.md b/doc/05-Distributed-Setups.md
index a324a65..9954e4c 100644
--- a/doc/05-Distributed-Setups.md
+++ b/doc/05-Distributed-Setups.md
@@ -11,22 +11,22 @@ First, you need an Icinga 2 high availability setup with two master nodes, such
[here](https://icinga.com/docs/icinga-2/latest/doc/06-distributed-monitoring#high-availability-master-with-agents).
Each of the master nodes must have the Icinga DB feature enabled and
-have their own dedicated Redis server set up for it, so that each node writes the monitoring data separately.
+have their own dedicated Redis® server set up for it, so that each node writes the monitoring data separately.
The setup steps per node are no different from a single node setup and can be found in the
[Icinga 2 installation documentation](https://icinga.com/docs/icinga-2/latest/doc/02-installation).
-Each Redis server will always have the complete data available as long as
-its corresponding Icinga 2 master is running and writing to its Redis.
+Each Redis® server will always have the complete data available as long as
+its corresponding Icinga 2 master is running and writing to its Redis®.
This is because the Icinga 2 master nodes synchronize their data and events with each other as long as
they are connected,
and each takes over the entire configuration in case the other node or their connection to each other fails.
-For each Redis server you need to set up its own dedicated Icinga DB instance that connects to it,
+For each Redis® server you need to set up its own dedicated Icinga DB instance that connects to it,
but the Icinga DB instances must write to the same database, which of course can be replicated or a cluster.
So the steps from the standard
[Icinga DB installation documentation](https://icinga.com/docs/icinga-db/latest/doc/02-installation)
can be followed. However, as mentioned, the database only needs to be set up once.
-All in all, an Icinga DB HA environment involves setting up two Icinga 2 master nodes, two Redis servers,
+All in all, an Icinga DB HA environment involves setting up two Icinga 2 master nodes, two Redis® servers,
two Icinga DB instances and one database.
Please read the note about the [environment ID](#environment-id),
@@ -48,9 +48,9 @@ Which Icinga DB instance is responsible is determined by a specific database ope
can only be performed by one instance first.
In the case of concurrent operations, simply put, only one wins via a locking mechanism.
Of course, this is only true if the environment is healthy.
-Icinga DB is not trying to be responsible if its corresponding Redis server is unavailable or
-Icinga 2 is not writing data to Redis.
-If Icinga 2 or Redis become unavailable for more than 60 seconds,
+Icinga DB is not trying to be responsible if its corresponding Redis® server is unavailable or
+Icinga 2 is not writing data to Redis®.
+If Icinga 2 or Redis® become unavailable for more than 60 seconds,
Icinga DB releases responsibility so the other instance can take over.
## Multiple Environments
diff --git a/doc/TRADEMARKS.md b/doc/TRADEMARKS.md
new file mode 100644
index 0000000..952751d
--- /dev/null
+++ b/doc/TRADEMARKS.md
@@ -0,0 +1,13 @@
+# Third-party Trademarks
+
+All trademarks, logos, and brand names are the property of their respective owners.
+Any mention of company, product, or service names in our documentations, product descriptions,
+or websites is solely for identification purposes. The use of these names, trademarks,
+and brands does not imply endorsement. This document acknowledges trademarks of companies and products,
+which are the property of their respective owners, whether registered or unregistered.
+
+## Redis®
+
+Redis is a registered trademark of Redis Ltd. Any rights therein are reserved to Redis Ltd.
+Any use by Icinga GmbH is for referential purposes only and does not indicate any sponsorship,
+endorsement or affiliation between Redis and Icinga GmbH.
diff --git a/doc/images/icingadb-architecture.png b/doc/images/icingadb-architecture.png
index 3d55ff7..c4af6eb 100644
--- a/doc/images/icingadb-architecture.png
+++ b/doc/images/icingadb-architecture.png
Binary files differ
diff --git a/doc/images/icingadb-daemon.png b/doc/images/icingadb-daemon.png
index de3f4c7..1c84152 100644
--- a/doc/images/icingadb-daemon.png
+++ b/doc/images/icingadb-daemon.png
Binary files differ
diff --git a/doc/images/icingadb-database.png b/doc/images/icingadb-database.png
index c300095..fc79725 100644
--- a/doc/images/icingadb-database.png
+++ b/doc/images/icingadb-database.png
Binary files differ
diff --git a/doc/images/icingadb-envs.png b/doc/images/icingadb-envs.png
index e9938d8..abff442 100644
--- a/doc/images/icingadb-envs.png
+++ b/doc/images/icingadb-envs.png
Binary files differ
diff --git a/doc/images/icingadb-ha.png b/doc/images/icingadb-ha.png
index a86b6a0..051689c 100644
--- a/doc/images/icingadb-ha.png
+++ b/doc/images/icingadb-ha.png
Binary files differ
diff --git a/doc/images/icingadb-web.png b/doc/images/icingadb-web.png
index 05a3e31..2d98334 100644
--- a/doc/images/icingadb-web.png
+++ b/doc/images/icingadb-web.png
Binary files differ
diff --git a/go.mod b/go.mod
index d313d1e..be40b0e 100644
--- a/go.mod
+++ b/go.mod
@@ -1,32 +1,32 @@
module github.com/icinga/icingadb
-go 1.18
+go 1.22
require (
github.com/creasty/defaults v1.7.0
- github.com/go-redis/redis/v8 v8.11.5
- github.com/go-sql-driver/mysql v1.7.1
- github.com/goccy/go-yaml v1.11.0
- github.com/google/uuid v1.3.0
+ github.com/go-sql-driver/mysql v1.8.1
+ github.com/goccy/go-yaml v1.11.3
+ github.com/google/uuid v1.6.0
github.com/jessevdk/go-flags v1.5.0
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.9
- github.com/mattn/go-sqlite3 v1.14.17
+ github.com/mattn/go-sqlite3 v1.14.22
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
github.com/pkg/errors v0.9.1
+ github.com/redis/go-redis/v9 v9.5.1
github.com/ssgreg/journald v1.0.0
- github.com/stretchr/testify v1.8.4
+ github.com/stretchr/testify v1.9.0
github.com/vbauerster/mpb/v6 v6.0.4
- go.uber.org/zap v1.25.0
+ go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d
- golang.org/x/sync v0.3.0
+ golang.org/x/sync v0.7.0
)
require (
+ filippo.io/edwards25519 v1.1.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
- github.com/benbjohnson/clock v1.3.0 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/fatih/color v1.13.0 // indirect
diff --git a/go.sum b/go.sum
index 2280500..baa2495 100644
--- a/go.sum
+++ b/go.sum
@@ -1,11 +1,15 @@
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
-github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
-github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
+github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
+github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
+github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/creasty/defaults v1.7.0 h1:eNdqZvc5B509z18lD8yc212CAqJNvfT1Jq6L8WowdBA=
github.com/creasty/defaults v1.7.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -14,25 +18,27 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
-github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
-github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
+github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
-github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
-github.com/goccy/go-yaml v1.11.0 h1:n7Z+zx8S9f9KgzG6KtQKf+kwqXZlLNR2F6018Dgau54=
-github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng=
+github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
+github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/goccy/go-yaml v1.11.3 h1:B3W9IdWbvrUu2OYQGwvU1nZtvMQJPBKgBUuweJjLj6I=
+github.com/goccy/go-yaml v1.11.3/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
@@ -46,37 +52,37 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow=
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
-github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
-github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
+github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ=
-github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8=
+github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU=
github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/vbauerster/mpb/v6 v6.0.4 h1:h6J5zM/2wimP5Hj00unQuV8qbo5EPcj6wbkCqgj7KcY=
github.com/vbauerster/mpb/v6 v6.0.4/go.mod h1:a/+JT57gqh6Du0Ay5jSR+uBMfXGdlR7VQlGP52fJxLM=
-go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c=
-go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d h1:vtUKgx8dahOomfFzLREU8nSv25YHnTgLBn4rDnWZdU0=
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
-golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
-golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -85,12 +91,9 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/internal/version.go b/internal/version.go
index 1b7adfb..937c736 100644
--- a/internal/version.go
+++ b/internal/version.go
@@ -7,4 +7,4 @@ import (
// Version contains version and Git commit information.
//
// The placeholders are replaced on `git archive` using the `export-subst` attribute.
-var Version = version.Version("1.1.1", "v1.1.1", "6c8b52f2033cd94466863c92d3df632e3c87743c")
+var Version = version.Version("1.2.0", "v1.2.0", "a0a65af0260b9821e4d72692b9c8fda545b6aeca")
diff --git a/pkg/backoff/backoff.go b/pkg/backoff/backoff.go
index 6ce7bee..e79a1ee 100644
--- a/pkg/backoff/backoff.go
+++ b/pkg/backoff/backoff.go
@@ -14,10 +14,10 @@ type Backoff func(uint64) time.Duration
// It panics if min >= max.
func NewExponentialWithJitter(min, max time.Duration) Backoff {
if min <= 0 {
- min = time.Millisecond * 100
+ min = 100 * time.Millisecond
}
if max <= 0 {
- max = time.Second * 10
+ max = 10 * time.Second
}
if min >= max {
panic("max must be larger than min")
diff --git a/pkg/config/config.go b/pkg/config/config.go
index a683014..744f4c3 100644
--- a/pkg/config/config.go
+++ b/pkg/config/config.go
@@ -1,15 +1,12 @@
package config
import (
- "bytes"
"crypto/tls"
"crypto/x509"
- "fmt"
"github.com/creasty/defaults"
"github.com/goccy/go-yaml"
"github.com/jessevdk/go-flags"
"github.com/pkg/errors"
- "io/ioutil"
"os"
)
@@ -19,8 +16,6 @@ type Config struct {
Redis Redis `yaml:"redis"`
Logging Logging `yaml:"logging"`
Retention Retention `yaml:"retention"`
-
- DecodeWarning error `yaml:"-"`
}
// Validate checks constraints in the supplied configuration and returns an error if they are violated.
@@ -51,13 +46,14 @@ type Flags struct {
// FromYAMLFile returns a new Config value created from the given YAML config file.
func FromYAMLFile(name string) (*Config, error) {
- f, err := os.ReadFile(name)
+ f, err := os.Open(name)
if err != nil {
- return nil, errors.Wrap(err, "can't read YAML file "+name)
+ return nil, errors.Wrap(err, "can't open YAML file "+name)
}
+ defer f.Close()
c := &Config{}
- d := yaml.NewDecoder(bytes.NewReader(f))
+ d := yaml.NewDecoder(f, yaml.DisallowUnknownField())
if err := defaults.Set(c); err != nil {
return nil, errors.Wrap(err, "can't set config defaults")
@@ -67,16 +63,8 @@ func FromYAMLFile(name string) (*Config, error) {
return nil, errors.Wrap(err, "can't parse YAML file "+name)
}
- // Decode again with yaml.DisallowUnknownField() (like v1.2 will do) and issue a warning if it returns an error.
- c.DecodeWarning = yaml.NewDecoder(bytes.NewReader(f), yaml.DisallowUnknownField()).Decode(&Config{})
-
if err := c.Validate(); err != nil {
- const msg = "invalid configuration"
- if warn := c.DecodeWarning; warn != nil {
- return nil, fmt.Errorf("%s: %w\n\nwarning: ignored unknown config option:\n\n%v", msg, err, warn)
- } else {
- return nil, errors.Wrap(err, msg)
- }
+ return nil, errors.Wrap(err, "invalid configuration")
}
return c, nil
@@ -129,7 +117,7 @@ func (t *TLS) MakeConfig(serverName string) (*tls.Config, error) {
if t.Insecure {
tlsConfig.InsecureSkipVerify = true
} else if t.Ca != "" {
- raw, err := ioutil.ReadFile(t.Ca)
+ raw, err := os.ReadFile(t.Ca)
if err != nil {
return nil, errors.Wrap(err, "can't read CA file")
}
diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go
index 2418094..94e3773 100644
--- a/pkg/config/config_test.go
+++ b/pkg/config/config_test.go
@@ -20,34 +20,33 @@ redis:
host: 2001:db8::1
`
- miniOutput := &Config{}
- _ = defaults.Set(miniOutput)
-
- miniOutput.Database.Host = "192.0.2.1"
- miniOutput.Database.Database = "icingadb"
- miniOutput.Database.User = "icingadb"
- miniOutput.Database.Password = "icingadb"
-
- miniOutput.Redis.Host = "2001:db8::1"
- miniOutput.Logging.Output = logging.CONSOLE
-
subtests := []struct {
name string
input string
output *Config
- warn bool
}{
{
- name: "mini",
- input: miniConf,
- output: miniOutput,
- warn: false,
+ name: "mini",
+ input: miniConf,
+ output: func() *Config {
+ c := &Config{}
+ _ = defaults.Set(c)
+
+ c.Database.Host = "192.0.2.1"
+ c.Database.Database = "icingadb"
+ c.Database.User = "icingadb"
+ c.Database.Password = "icingadb"
+
+ c.Redis.Host = "2001:db8::1"
+ c.Logging.Output = logging.CONSOLE
+
+ return c
+ }(),
},
{
name: "mini-with-unknown",
input: miniConf + "\nunknown: 42",
- output: miniOutput,
- warn: true,
+ output: nil,
},
}
@@ -59,19 +58,12 @@ redis:
require.NoError(t, os.WriteFile(tempFile.Name(), []byte(st.input), 0o600))
- actual, err := FromYAMLFile(tempFile.Name())
- require.NoError(t, err)
-
- if st.warn {
- require.Error(t, actual.DecodeWarning, "reading config should produce a warning")
-
- // Reset the warning so that the following require.Equal() doesn't try to compare it.
- actual.DecodeWarning = nil
+ if actual, err := FromYAMLFile(tempFile.Name()); st.output == nil {
+ require.Error(t, err)
} else {
- require.NoError(t, actual.DecodeWarning, "reading config should not produce a warning")
+ require.NoError(t, err)
+ require.Equal(t, st.output, actual)
}
-
- require.Equal(t, st.output, actual)
})
}
}
diff --git a/pkg/config/database.go b/pkg/config/database.go
index b42ff8e..0895d26 100644
--- a/pkg/config/database.go
+++ b/pkg/config/database.go
@@ -1,25 +1,25 @@
package config
import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
"fmt"
"github.com/go-sql-driver/mysql"
- "github.com/icinga/icingadb/pkg/driver"
"github.com/icinga/icingadb/pkg/icingadb"
"github.com/icinga/icingadb/pkg/logging"
"github.com/icinga/icingadb/pkg/utils"
"github.com/jmoiron/sqlx"
"github.com/jmoiron/sqlx/reflectx"
+ "github.com/lib/pq"
"github.com/pkg/errors"
"net"
"net/url"
"strconv"
"strings"
- "sync"
"time"
)
-var registerDriverOnce sync.Once
-
// Database defines database client configuration.
type Database struct {
Type string `yaml:"type" default:"mysql"`
@@ -35,17 +35,14 @@ type Database struct {
// Open prepares the DSN string and driver configuration,
// calls sqlx.Open, but returns *icingadb.DB.
func (d *Database) Open(logger *logging.Logger) (*icingadb.DB, error) {
- registerDriverOnce.Do(func() {
- driver.Register(logger)
- })
-
- var dsn string
+ var db *sqlx.DB
switch d.Type {
case "mysql":
config := mysql.NewConfig()
config.User = d.User
config.Passwd = d.Password
+ config.Logger = icingadb.MysqlFuncLogger(logger.Debug)
if d.isUnixAddr() {
config.Net = "unix"
@@ -61,7 +58,7 @@ func (d *Database) Open(logger *logging.Logger) (*icingadb.DB, error) {
config.DBName = d.Database
config.Timeout = time.Minute
- config.Params = map[string]string{"sql_mode": "ANSI_QUOTES"}
+ config.Params = map[string]string{"sql_mode": "'TRADITIONAL,ANSI_QUOTES'"}
tlsConfig, err := d.TlsOptions.MakeConfig(d.Host)
if err != nil {
@@ -75,7 +72,17 @@ func (d *Database) Open(logger *logging.Logger) (*icingadb.DB, error) {
}
}
- dsn = config.FormatDSN()
+ c, err := mysql.NewConnector(config)
+ if err != nil {
+ return nil, errors.Wrap(err, "can't open mysql database")
+ }
+
+ wsrepSyncWait := int64(d.Options.WsrepSyncWait)
+ setWsrepSyncWait := func(ctx context.Context, conn driver.Conn) error {
+ return setGaleraOpts(ctx, conn, wsrepSyncWait)
+ }
+
+ db = sqlx.NewDb(sql.OpenDB(icingadb.NewConnector(c, logger, setWsrepSyncWait)), icingadb.MySQL)
case "pgsql":
uri := &url.URL{
Scheme: "postgres",
@@ -123,16 +130,17 @@ func (d *Database) Open(logger *logging.Logger) (*icingadb.DB, error) {
}
uri.RawQuery = query.Encode()
- dsn = uri.String()
+
+ connector, err := pq.NewConnector(uri.String())
+ if err != nil {
+ return nil, errors.Wrap(err, "can't open pgsql database")
+ }
+
+ db = sqlx.NewDb(sql.OpenDB(icingadb.NewConnector(connector, logger, nil)), icingadb.PostgreSQL)
default:
return nil, unknownDbType(d.Type)
}
- db, err := sqlx.Open("icingadb-"+d.Type, dsn)
- if err != nil {
- return nil, errors.Wrap(err, "can't open database")
- }
-
db.SetMaxIdleConns(d.Options.MaxConnections / 3)
db.SetMaxOpenConns(d.Options.MaxConnections)
@@ -173,3 +181,36 @@ func (d *Database) isUnixAddr() bool {
func unknownDbType(t string) error {
return errors.Errorf(`unknown database type %q, must be one of: "mysql", "pgsql"`, t)
}
+
+// setGaleraOpts sets the "wsrep_sync_wait" variable for each session ensures that causality checks are performed
+// before execution and that each statement is executed on a fully synchronized node. Doing so prevents foreign key
+// violation when inserting into dependent tables on different MariaDB/MySQL nodes. When using MySQL single nodes,
+// the "SET SESSION" command will fail with "Unknown system variable (1193)" and will therefore be silently dropped.
+//
+// https://mariadb.com/kb/en/galera-cluster-system-variables/#wsrep_sync_wait
+func setGaleraOpts(ctx context.Context, conn driver.Conn, wsrepSyncWait int64) error {
+ const galeraOpts = "SET SESSION wsrep_sync_wait=?"
+
+ stmt, err := conn.(driver.ConnPrepareContext).PrepareContext(ctx, galeraOpts)
+ if err != nil {
+ if errors.Is(err, &mysql.MySQLError{Number: 1193}) { // Unknown system variable
+ return nil
+ }
+
+ return errors.Wrap(err, "cannot prepare "+galeraOpts)
+ }
+ // This is just for an unexpected exit and any returned error can safely be ignored and in case
+ // of the normal function exit, the stmt is closed manually, and its error is handled gracefully.
+ defer func() { _ = stmt.Close() }()
+
+ _, err = stmt.(driver.StmtExecContext).ExecContext(ctx, []driver.NamedValue{{Value: wsrepSyncWait}})
+ if err != nil {
+ return errors.Wrap(err, "cannot execute "+galeraOpts)
+ }
+
+ if err = stmt.Close(); err != nil {
+ return errors.Wrap(err, "cannot close prepared statement "+galeraOpts)
+ }
+
+ return nil
+}
diff --git a/pkg/config/redis.go b/pkg/config/redis.go
index 38571e3..ad8b31a 100644
--- a/pkg/config/redis.go
+++ b/pkg/config/redis.go
@@ -4,13 +4,13 @@ import (
"context"
"crypto/tls"
"fmt"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/backoff"
"github.com/icinga/icingadb/pkg/icingaredis"
"github.com/icinga/icingadb/pkg/logging"
"github.com/icinga/icingadb/pkg/retry"
"github.com/icinga/icingadb/pkg/utils"
"github.com/pkg/errors"
+ "github.com/redis/go-redis/v9"
"go.uber.org/zap"
"net"
"strings"
@@ -85,16 +85,16 @@ func dialWithLogging(dialer ctxDialerFunc, logger *logging.Logger) ctxDialerFunc
retry.Retryable,
backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second),
retry.Settings{
- Timeout: 5 * time.Minute,
- OnError: func(_ time.Duration, _ uint64, err, lastErr error) {
+ Timeout: retry.DefaultTimeout,
+ OnRetryableError: func(_ time.Duration, _ uint64, err, lastErr error) {
if lastErr == nil || err.Error() != lastErr.Error() {
logger.Warnw("Can't connect to Redis. Retrying", zap.Error(err))
}
},
OnSuccess: func(elapsed time.Duration, attempt uint64, _ error) {
- if attempt > 0 {
+ if attempt > 1 {
logger.Infow("Reconnected to Redis",
- zap.Duration("after", elapsed), zap.Uint64("attempts", attempt+1))
+ zap.Duration("after", elapsed), zap.Uint64("attempts", attempt))
}
},
},
diff --git a/pkg/driver/driver.go b/pkg/driver/driver.go
deleted file mode 100644
index f529db4..0000000
--- a/pkg/driver/driver.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package driver
-
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "github.com/go-sql-driver/mysql"
- "github.com/icinga/icingadb/pkg/backoff"
- "github.com/icinga/icingadb/pkg/icingaredis/telemetry"
- "github.com/icinga/icingadb/pkg/logging"
- "github.com/icinga/icingadb/pkg/retry"
- "github.com/jmoiron/sqlx"
- "github.com/pkg/errors"
- "go.uber.org/zap"
- "time"
-)
-
-const MySQL = "icingadb-mysql"
-const PostgreSQL = "icingadb-pgsql"
-
-var timeout = time.Minute * 5
-
-// RetryConnector wraps driver.Connector with retry logic.
-type RetryConnector struct {
- driver.Connector
- driver Driver
-}
-
-// Connect implements part of the driver.Connector interface.
-func (c RetryConnector) Connect(ctx context.Context) (driver.Conn, error) {
- var conn driver.Conn
- err := errors.Wrap(retry.WithBackoff(
- ctx,
- func(ctx context.Context) (err error) {
- conn, err = c.Connector.Connect(ctx)
- return
- },
- shouldRetry,
- backoff.NewExponentialWithJitter(time.Millisecond*128, time.Minute*1),
- retry.Settings{
- Timeout: timeout,
- OnError: func(_ time.Duration, _ uint64, err, lastErr error) {
- telemetry.UpdateCurrentDbConnErr(err)
-
- if lastErr == nil || err.Error() != lastErr.Error() {
- c.driver.Logger.Warnw("Can't connect to database. Retrying", zap.Error(err))
- }
- },
- OnSuccess: func(elapsed time.Duration, attempt uint64, _ error) {
- telemetry.UpdateCurrentDbConnErr(nil)
-
- if attempt > 0 {
- c.driver.Logger.Infow("Reconnected to database",
- zap.Duration("after", elapsed), zap.Uint64("attempts", attempt+1))
- }
- },
- },
- ), "can't connect to database")
- return conn, err
-}
-
-// Driver implements part of the driver.Connector interface.
-func (c RetryConnector) Driver() driver.Driver {
- return c.driver
-}
-
-// Driver wraps a driver.Driver that also must implement driver.DriverContext with logging capabilities and provides our RetryConnector.
-type Driver struct {
- ctxDriver
- Logger *logging.Logger
-}
-
-// OpenConnector implements the DriverContext interface.
-func (d Driver) OpenConnector(name string) (driver.Connector, error) {
- c, err := d.ctxDriver.OpenConnector(name)
- if err != nil {
- return nil, err
- }
-
- return &RetryConnector{
- driver: d,
- Connector: c,
- }, nil
-}
-
-// Register makes our database Driver available under the name "icingadb-*sql".
-func Register(logger *logging.Logger) {
- sql.Register(MySQL, &Driver{ctxDriver: &mysql.MySQLDriver{}, Logger: logger})
- sql.Register(PostgreSQL, &Driver{ctxDriver: PgSQLDriver{}, Logger: logger})
- _ = mysql.SetLogger(mysqlLogger(func(v ...interface{}) { logger.Debug(v...) }))
- sqlx.BindDriver(PostgreSQL, sqlx.DOLLAR)
-}
-
-// ctxDriver helps ensure that we only support drivers that implement driver.Driver and driver.DriverContext.
-type ctxDriver interface {
- driver.Driver
- driver.DriverContext
-}
-
-// mysqlLogger is an adapter that allows ordinary functions to be used as a logger for mysql.SetLogger.
-type mysqlLogger func(v ...interface{})
-
-// Print implements the mysql.Logger interface.
-func (log mysqlLogger) Print(v ...interface{}) {
- log(v)
-}
-
-func shouldRetry(err error) bool {
- if errors.Is(err, driver.ErrBadConn) {
- return true
- }
-
- return retry.Retryable(err)
-}
diff --git a/pkg/driver/pgsql.go b/pkg/driver/pgsql.go
deleted file mode 100644
index 3c88fe0..0000000
--- a/pkg/driver/pgsql.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package driver
-
-import (
- "database/sql/driver"
- "github.com/lib/pq"
-)
-
-// PgSQLDriver extends pq.Driver with driver.DriverContext compliance.
-type PgSQLDriver struct {
- pq.Driver
-}
-
-// Assert interface compliance.
-var (
- _ driver.Driver = PgSQLDriver{}
- _ driver.DriverContext = PgSQLDriver{}
-)
-
-// OpenConnector implements the driver.DriverContext interface.
-func (PgSQLDriver) OpenConnector(name string) (driver.Connector, error) {
- return pq.NewConnector(name)
-}
diff --git a/pkg/flatten/flatten.go b/pkg/flatten/flatten.go
index 94a6e7e..698eff1 100644
--- a/pkg/flatten/flatten.go
+++ b/pkg/flatten/flatten.go
@@ -1,7 +1,6 @@
package flatten
import (
- "database/sql"
"fmt"
"github.com/icinga/icingadb/pkg/types"
"strconv"
@@ -32,12 +31,12 @@ func Flatten(value interface{}, prefix string) map[string]types.String {
for i, v := range value {
flatten(key+"["+strconv.Itoa(i)+"]", v)
}
+ case nil:
+ flattened[key] = types.MakeString("null")
+ case float64:
+ flattened[key] = types.MakeString(strconv.FormatFloat(value, 'f', -1, 64))
default:
- val := "null"
- if value != nil {
- val = fmt.Sprintf("%v", value)
- }
- flattened[key] = types.String{NullString: sql.NullString{String: val, Valid: true}}
+ flattened[key] = types.MakeString(fmt.Sprintf("%v", value))
}
}
diff --git a/pkg/flatten/flatten_test.go b/pkg/flatten/flatten_test.go
new file mode 100644
index 0000000..f84b8d9
--- /dev/null
+++ b/pkg/flatten/flatten_test.go
@@ -0,0 +1,45 @@
+package flatten
+
+import (
+ "github.com/icinga/icingadb/pkg/types"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestFlatten(t *testing.T) {
+ for _, st := range []struct {
+ name string
+ prefix string
+ value any
+ output map[string]types.String
+ }{
+ {"nil", "a", nil, map[string]types.String{"a": types.MakeString("null")}},
+ {"bool", "b", true, map[string]types.String{"b": types.MakeString("true")}},
+ {"int", "c", 42, map[string]types.String{"c": types.MakeString("42")}},
+ {"float", "d", 77.7, map[string]types.String{"d": types.MakeString("77.7")}},
+ {"large_float", "e", 1e23, map[string]types.String{"e": types.MakeString("100000000000000000000000")}},
+ {"string", "f", "\x00", map[string]types.String{"f": types.MakeString("\x00")}},
+ {"nil_slice", "g", []any(nil), map[string]types.String{"g": {}}},
+ {"empty_slice", "h", []any{}, map[string]types.String{"h": {}}},
+ {"slice", "i", []any{nil}, map[string]types.String{"i[0]": types.MakeString("null")}},
+ {"nil_map", "j", map[string]any(nil), map[string]types.String{"j": {}}},
+ {"empty_map", "k", map[string]any{}, map[string]types.String{"k": {}}},
+ {"map", "l", map[string]any{" ": nil}, map[string]types.String{"l. ": types.MakeString("null")}},
+ {"map_with_slice", "m", map[string]any{"\t": []any{"ä", "ö", "ü"}, "ß": "s"}, map[string]types.String{
+ "m.\t[0]": types.MakeString("ä"),
+ "m.\t[1]": types.MakeString("ö"),
+ "m.\t[2]": types.MakeString("ü"),
+ "m.ß": types.MakeString("s"),
+ }},
+ {"slice_with_map", "n", []any{map[string]any{"ä": "a", "ö": "o", "ü": "u"}, "ß"}, map[string]types.String{
+ "n[0].ä": types.MakeString("a"),
+ "n[0].ö": types.MakeString("o"),
+ "n[0].ü": types.MakeString("u"),
+ "n[1]": types.MakeString("ß"),
+ }},
+ } {
+ t.Run(st.name, func(t *testing.T) {
+ assert.Equal(t, st.output, Flatten(st.value, st.prefix))
+ })
+ }
+}
diff --git a/pkg/icingadb/cleanup.go b/pkg/icingadb/cleanup.go
index e57eafa..22bf02d 100644
--- a/pkg/icingadb/cleanup.go
+++ b/pkg/icingadb/cleanup.go
@@ -4,8 +4,9 @@ import (
"context"
"fmt"
"github.com/icinga/icingadb/internal"
+ "github.com/icinga/icingadb/pkg/backoff"
"github.com/icinga/icingadb/pkg/com"
- "github.com/icinga/icingadb/pkg/driver"
+ "github.com/icinga/icingadb/pkg/retry"
"github.com/icinga/icingadb/pkg/types"
"time"
)
@@ -20,10 +21,10 @@ type CleanupStmt struct {
// Build assembles the cleanup statement for the specified database driver with the given limit.
func (stmt *CleanupStmt) Build(driverName string, limit uint64) string {
switch driverName {
- case driver.MySQL, "mysql":
+ case MySQL:
return fmt.Sprintf(`DELETE FROM %[1]s WHERE environment_id = :environment_id AND %[2]s < :time
ORDER BY %[2]s LIMIT %[3]d`, stmt.Table, stmt.Column, limit)
- case driver.PostgreSQL, "postgres":
+ case PostgreSQL:
return fmt.Sprintf(`WITH rows AS (
SELECT %[1]s FROM %[2]s WHERE environment_id = :environment_id AND %[3]s < :time ORDER BY %[3]s LIMIT %[4]d
)
@@ -41,32 +42,46 @@ func (db *DB) CleanupOlderThan(
count uint64, olderThan time.Time, onSuccess ...OnSuccess[struct{}],
) (uint64, error) {
var counter com.Counter
- defer db.log(ctx, stmt.Build(db.DriverName(), 0), &counter).Stop()
+
+ q := db.Rebind(stmt.Build(db.DriverName(), count))
+
+ defer db.log(ctx, q, &counter).Stop()
for {
- q := db.Rebind(stmt.Build(db.DriverName(), count))
- rs, err := db.NamedExecContext(ctx, q, cleanupWhere{
- EnvironmentId: envId,
- Time: types.UnixMilli(olderThan),
- })
- if err != nil {
- return 0, internal.CantPerformQuery(err, q)
- }
+ var rowsDeleted int64
+
+ err := retry.WithBackoff(
+ ctx,
+ func(ctx context.Context) error {
+ rs, err := db.NamedExecContext(ctx, q, cleanupWhere{
+ EnvironmentId: envId,
+ Time: types.UnixMilli(olderThan),
+ })
+ if err != nil {
+ return internal.CantPerformQuery(err, q)
+ }
+
+ rowsDeleted, err = rs.RowsAffected()
- n, err := rs.RowsAffected()
+ return err
+ },
+ retry.Retryable,
+ backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second),
+ db.getDefaultRetrySettings(),
+ )
if err != nil {
return 0, err
}
- counter.Add(uint64(n))
+ counter.Add(uint64(rowsDeleted))
for _, onSuccess := range onSuccess {
- if err := onSuccess(ctx, make([]struct{}, n)); err != nil {
+ if err := onSuccess(ctx, make([]struct{}, rowsDeleted)); err != nil {
return 0, err
}
}
- if n < int64(count) {
+ if rowsDeleted < int64(count) {
break
}
}
diff --git a/pkg/icingadb/db.go b/pkg/icingadb/db.go
index 4ff3e0d..47940af 100644
--- a/pkg/icingadb/db.go
+++ b/pkg/icingadb/db.go
@@ -7,13 +7,13 @@ import (
"github.com/icinga/icingadb/pkg/backoff"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/contracts"
- "github.com/icinga/icingadb/pkg/driver"
"github.com/icinga/icingadb/pkg/logging"
"github.com/icinga/icingadb/pkg/periodic"
"github.com/icinga/icingadb/pkg/retry"
"github.com/icinga/icingadb/pkg/utils"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
+ "go.uber.org/zap"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
"reflect"
@@ -54,6 +54,12 @@ type Options struct {
// MaxRowsPerTransaction defines the maximum number of rows per transaction.
// The default is 2^13, which in our tests showed the best performance in terms of execution time and parallelism.
MaxRowsPerTransaction int `yaml:"max_rows_per_transaction" default:"8192"`
+
+ // WsrepSyncWait enforces Galera cluster nodes to perform strict cluster-wide causality checks
+ // before executing specific SQL queries determined by the number you provided.
+ // Please refer to the below link for a detailed description.
+ // https://icinga.com/docs/icinga-db/latest/doc/03-Configuration/#galera-cluster
+ WsrepSyncWait int `yaml:"wsrep_sync_wait" default:"7"`
}
// Validate checks constraints in the supplied database options and returns an error if they are violated.
@@ -70,6 +76,9 @@ func (o *Options) Validate() error {
if o.MaxRowsPerTransaction < 1 {
return errors.New("max_rows_per_transaction must be at least 1")
}
+ if o.WsrepSyncWait < 0 || o.WsrepSyncWait > 15 {
+ return errors.New("wsrep_sync_wait can only be set to a number between 0 and 15")
+ }
return nil
}
@@ -85,23 +94,35 @@ func NewDb(db *sqlx.DB, logger *logging.Logger, options *Options) *DB {
}
const (
- expectedMysqlSchemaVersion = 4
- expectedPostgresSchemaVersion = 2
+ expectedMysqlSchemaVersion = 5
+ expectedPostgresSchemaVersion = 3
)
// CheckSchema asserts the database schema of the expected version being present.
func (db *DB) CheckSchema(ctx context.Context) error {
var expectedDbSchemaVersion uint16
switch db.DriverName() {
- case driver.MySQL:
+ case MySQL:
expectedDbSchemaVersion = expectedMysqlSchemaVersion
- case driver.PostgreSQL:
+ case PostgreSQL:
expectedDbSchemaVersion = expectedPostgresSchemaVersion
}
var version uint16
- err := db.QueryRowxContext(ctx, "SELECT version FROM icingadb_schema ORDER BY id DESC LIMIT 1").Scan(&version)
+ err := retry.WithBackoff(
+ ctx,
+ func(ctx context.Context) (err error) {
+ query := "SELECT version FROM icingadb_schema ORDER BY id DESC LIMIT 1"
+ err = db.QueryRowxContext(ctx, query).Scan(&version)
+ if err != nil {
+ err = internal.CantPerformQuery(err, query)
+ }
+ return
+ },
+ retry.Retryable,
+ backoff.NewExponentialWithJitter(128*time.Millisecond, 1*time.Minute),
+ db.getDefaultRetrySettings())
if err != nil {
return errors.Wrap(err, "can't check database schema version")
}
@@ -161,10 +182,10 @@ func (db *DB) BuildInsertIgnoreStmt(into interface{}) (string, int) {
var clause string
switch db.DriverName() {
- case driver.MySQL:
+ case MySQL:
// MySQL treats UPDATE id = id as a no-op.
clause = fmt.Sprintf(`ON DUPLICATE KEY UPDATE "%s" = "%s"`, columns[0], columns[0])
- case driver.PostgreSQL:
+ case PostgreSQL:
clause = fmt.Sprintf("ON CONFLICT ON CONSTRAINT pk_%s DO NOTHING", table)
}
@@ -224,10 +245,10 @@ func (db *DB) BuildUpsertStmt(subject interface{}) (stmt string, placeholders in
var clause, setFormat string
switch db.DriverName() {
- case driver.MySQL:
+ case MySQL:
clause = "ON DUPLICATE KEY UPDATE"
setFormat = `"%[1]s" = VALUES("%[1]s")`
- case driver.PostgreSQL:
+ case PostgreSQL:
clause = fmt.Sprintf("ON CONFLICT ON CONSTRAINT pk_%s DO UPDATE SET", table)
setFormat = `"%[1]s" = EXCLUDED."%[1]s"`
}
@@ -338,7 +359,7 @@ func (db *DB) BulkExec(
},
retry.Retryable,
backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second),
- retry.Settings{},
+ db.getDefaultRetrySettings(),
)
}
}(b))
@@ -403,7 +424,7 @@ func (db *DB) NamedBulkExec(
},
retry.Retryable,
backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second),
- retry.Settings{},
+ db.getDefaultRetrySettings(),
)
}
}(b))
@@ -476,7 +497,7 @@ func (db *DB) NamedBulkExecTx(
},
retry.Retryable,
backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second),
- retry.Settings{},
+ db.getDefaultRetrySettings(),
)
}
}(b))
@@ -662,6 +683,25 @@ func (db *DB) GetSemaphoreForTable(table string) *semaphore.Weighted {
}
}
+func (db *DB) getDefaultRetrySettings() retry.Settings {
+ return retry.Settings{
+ Timeout: retry.DefaultTimeout,
+ OnRetryableError: func(_ time.Duration, _ uint64, err, lastErr error) {
+ if lastErr == nil || err.Error() != lastErr.Error() {
+ db.logger.Warnw("Can't execute query. Retrying", zap.Error(err))
+ }
+ },
+ OnSuccess: func(elapsed time.Duration, attempt uint64, lastErr error) {
+ if attempt > 1 {
+ db.logger.Infow("Query retried successfully after error",
+ zap.Duration("after", elapsed),
+ zap.Uint64("attempts", attempt),
+ zap.NamedError("recovered_error", lastErr))
+ }
+ },
+ }
+}
+
func (db *DB) log(ctx context.Context, query string, counter *com.Counter) periodic.Stopper {
return periodic.Start(ctx, db.logger.Interval(), func(tick periodic.Tick) {
if count := counter.Reset(); count > 0 {
diff --git a/pkg/icingadb/driver.go b/pkg/icingadb/driver.go
new file mode 100644
index 0000000..d564916
--- /dev/null
+++ b/pkg/icingadb/driver.go
@@ -0,0 +1,90 @@
+package icingadb
+
+import (
+ "context"
+ "database/sql/driver"
+ "github.com/icinga/icingadb/pkg/backoff"
+ "github.com/icinga/icingadb/pkg/icingaredis/telemetry"
+ "github.com/icinga/icingadb/pkg/logging"
+ "github.com/icinga/icingadb/pkg/retry"
+ "github.com/pkg/errors"
+ "go.uber.org/zap"
+ "time"
+)
+
+// Driver names as automatically registered in the database/sql package by themselves.
+const (
+ MySQL string = "mysql"
+ PostgreSQL string = "postgres"
+)
+
+type InitConnFunc func(context.Context, driver.Conn) error
+
+// RetryConnector wraps driver.Connector with retry logic.
+type RetryConnector struct {
+ driver.Connector
+
+ logger *logging.Logger
+
+ // initConn can be used to execute post Connect() arbitrary actions.
+ // It will be called after successfully initiated a new connection using the connector's Connect method.
+ initConn InitConnFunc
+}
+
+// NewConnector creates a fully initialized RetryConnector from the given args.
+func NewConnector(c driver.Connector, logger *logging.Logger, init InitConnFunc) *RetryConnector {
+ return &RetryConnector{Connector: c, logger: logger, initConn: init}
+}
+
+// Connect implements part of the driver.Connector interface.
+func (c RetryConnector) Connect(ctx context.Context) (driver.Conn, error) {
+ var conn driver.Conn
+ err := errors.Wrap(retry.WithBackoff(
+ ctx,
+ func(ctx context.Context) (err error) {
+ conn, err = c.Connector.Connect(ctx)
+ if err == nil && c.initConn != nil {
+ if err = c.initConn(ctx, conn); err != nil {
+ // We're going to retry this, so just don't bother whether Close() fails!
+ _ = conn.Close()
+ }
+ }
+
+ return
+ },
+ retry.Retryable,
+ backoff.NewExponentialWithJitter(128*time.Millisecond, 1*time.Minute),
+ retry.Settings{
+ Timeout: retry.DefaultTimeout,
+ OnRetryableError: func(_ time.Duration, _ uint64, err, lastErr error) {
+ telemetry.UpdateCurrentDbConnErr(err)
+
+ if lastErr == nil || err.Error() != lastErr.Error() {
+ c.logger.Warnw("Can't connect to database. Retrying", zap.Error(err))
+ }
+ },
+ OnSuccess: func(elapsed time.Duration, attempt uint64, _ error) {
+ telemetry.UpdateCurrentDbConnErr(nil)
+
+ if attempt > 1 {
+ c.logger.Infow("Reconnected to database",
+ zap.Duration("after", elapsed), zap.Uint64("attempts", attempt))
+ }
+ },
+ },
+ ), "can't connect to database")
+ return conn, err
+}
+
+// Driver implements part of the driver.Connector interface.
+func (c RetryConnector) Driver() driver.Driver {
+ return c.Connector.Driver()
+}
+
+// MysqlFuncLogger is an adapter that allows ordinary functions to be used as a logger for mysql.SetLogger.
+type MysqlFuncLogger func(v ...interface{})
+
+// Print implements the mysql.Logger interface.
+func (log MysqlFuncLogger) Print(v ...interface{}) {
+ log(v)
+}
diff --git a/pkg/icingadb/dump_signals.go b/pkg/icingadb/dump_signals.go
index bce1aef..2f8b46e 100644
--- a/pkg/icingadb/dump_signals.go
+++ b/pkg/icingadb/dump_signals.go
@@ -2,10 +2,10 @@ package icingadb
import (
"context"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/icingaredis"
"github.com/icinga/icingadb/pkg/logging"
"github.com/pkg/errors"
+ "github.com/redis/go-redis/v9"
"go.uber.org/zap"
"sync"
)
diff --git a/pkg/icingadb/ha.go b/pkg/icingadb/ha.go
index 74d3b32..cc32a4b 100644
--- a/pkg/icingadb/ha.go
+++ b/pkg/icingadb/ha.go
@@ -9,7 +9,6 @@ import (
"github.com/icinga/icingadb/internal"
"github.com/icinga/icingadb/pkg/backoff"
"github.com/icinga/icingadb/pkg/com"
- "github.com/icinga/icingadb/pkg/driver"
v1 "github.com/icinga/icingadb/pkg/icingadb/v1"
"github.com/icinga/icingadb/pkg/icingaredis"
icingaredisv1 "github.com/icinga/icingadb/pkg/icingaredis/v1"
@@ -23,7 +22,10 @@ import (
"time"
)
-var timeout = 60 * time.Second
+// peerTimeout defines the timeout for HA heartbeats, being used to detect absent nodes.
+//
+// Because this timeout relies on icingaredis.Timeout, it is icingaredis.Timeout plus a short grace period.
+const peerTimeout = icingaredis.Timeout + 5*time.Second
type haState struct {
responsibleTsMilli int64
@@ -43,8 +45,8 @@ type HA struct {
heartbeat *icingaredis.Heartbeat
logger *logging.Logger
responsible bool
- handover chan struct{}
- takeover chan struct{}
+ handover chan string
+ takeover chan string
done chan struct{}
errOnce sync.Once
errMu sync.Mutex
@@ -64,8 +66,8 @@ func NewHA(ctx context.Context, db *DB, heartbeat *icingaredis.Heartbeat, logger
db: db,
heartbeat: heartbeat,
logger: logger,
- handover: make(chan struct{}),
- takeover: make(chan struct{}),
+ handover: make(chan string),
+ takeover: make(chan string),
done: make(chan struct{}),
}
@@ -107,13 +109,13 @@ func (h *HA) Err() error {
return h.err
}
-// Handover returns a channel with which handovers are signaled.
-func (h *HA) Handover() chan struct{} {
+// Handover returns a channel with which handovers and their reasons are signaled.
+func (h *HA) Handover() chan string {
return h.handover
}
-// Takeover returns a channel with which takeovers are signaled.
-func (h *HA) Takeover() chan struct{} {
+// Takeover returns a channel with which takeovers and their reasons are signaled.
+func (h *HA) Takeover() chan string {
return h.takeover
}
@@ -141,12 +143,24 @@ func (h *HA) controller() {
oldInstancesRemoved := false
- logTicker := time.NewTicker(time.Second * 60)
- defer logTicker.Stop()
- shouldLog := true
+ // Suppress recurring log messages in the realize method to be only logged this often.
+ routineLogTicker := time.NewTicker(5 * time.Minute)
+ defer routineLogTicker.Stop()
+ shouldLogRoutineEvents := true
+
+ // The retry logic in HA is twofold:
+ //
+ // 1) Updating or inserting the instance row based on the current heartbeat must be done within the heartbeat's
+ // expiration time. Therefore, we use a deadline ctx to retry.WithBackoff() in realize() which expires earlier
+ // than our default timeout.
+ // 2) Since we do not want to exit before our default timeout expires, we have to repeat step 1 until it does.
+ retryTimeout := time.NewTimer(retry.DefaultTimeout)
+ defer retryTimeout.Stop()
for {
select {
+ case <-retryTimeout.C:
+ h.abort(errors.New("retry deadline exceeded"))
case m := <-h.heartbeat.Events():
if m != nil {
now := time.Now()
@@ -158,10 +172,15 @@ func (h *HA) controller() {
if tt.After(now.Add(1 * time.Second)) {
h.logger.Debugw("Received heartbeat from the future", zap.Time("time", tt))
}
- if tt.Before(now.Add(-1 * timeout)) {
+ if tt.Before(now.Add(-1 * peerTimeout)) {
h.logger.Errorw("Received heartbeat from the past", zap.Time("time", tt))
- h.signalHandover()
+
+ h.signalHandover("received heartbeat from the past")
h.realizeLostHeartbeat()
+
+ // Reset retry timeout so that the next iterations have the full amount of time available again.
+ retry.ResetTimeout(retryTimeout, retry.DefaultTimeout)
+
continue
}
s, err := m.Stats().IcingaStatus()
@@ -186,33 +205,28 @@ func (h *HA) controller() {
EntityWithoutChecksum: v1.EntityWithoutChecksum{IdMeta: v1.IdMeta{
Id: envId,
}},
- Name: types.String{
- NullString: sql.NullString{
- String: envId.String(),
- Valid: true,
- },
- },
+ Name: types.MakeString(envId.String()),
}
h.environmentMu.Unlock()
}
select {
- case <-logTicker.C:
- shouldLog = true
+ case <-routineLogTicker.C:
+ shouldLogRoutineEvents = true
default:
}
- var realizeCtx context.Context
- var cancelRealizeCtx context.CancelFunc
- if h.responsible {
- realizeCtx, cancelRealizeCtx = context.WithDeadline(h.ctx, m.ExpiryTime())
- } else {
- realizeCtx, cancelRealizeCtx = context.WithCancel(h.ctx)
- }
- err = h.realize(realizeCtx, s, t, envId, shouldLog)
+ // Ensure that updating/inserting the instance row is completed by the current heartbeat's expiry time.
+ realizeCtx, cancelRealizeCtx := context.WithDeadline(h.ctx, m.ExpiryTime())
+ err = h.realize(realizeCtx, s, t, envId, shouldLogRoutineEvents)
cancelRealizeCtx()
if errors.Is(err, context.DeadlineExceeded) {
- h.signalHandover()
+ h.signalHandover("instance update/insert deadline exceeded heartbeat expiry time")
+
+ // Instance insert/update was not completed by the expiration time of the current heartbeat.
+ // Pass control back to the loop to try again with the next heartbeat,
+ // or exit the loop when the retry timeout has expired. Therefore,
+ // retry timeout is **not** reset here so that retries continue until the timeout has expired.
continue
}
if err != nil {
@@ -224,12 +238,20 @@ func (h *HA) controller() {
oldInstancesRemoved = true
}
- shouldLog = false
+ shouldLogRoutineEvents = false
} else {
h.logger.Error("Lost heartbeat")
- h.signalHandover()
+ h.signalHandover("lost heartbeat")
h.realizeLostHeartbeat()
}
+
+ // Reset retry timeout so that the next iterations have the full amount of time available again.
+ // Don't be surprised by the location of the code,
+ // as it is obvious that the timer is also reset after an error that ends the loop anyway.
+ // But this is the best place to catch all scenarios where the timeout needs to be reset.
+ // And since HA needs quite a bit of refactoring anyway to e.g. return immediately after calling h.abort(),
+ // it's fine to have it here for now.
+ retry.ResetTimeout(retryTimeout, retry.DefaultTimeout)
case <-h.heartbeat.Done():
if err := h.heartbeat.Err(); err != nil {
h.abort(err)
@@ -240,18 +262,34 @@ func (h *HA) controller() {
}
}
-func (h *HA) realize(ctx context.Context, s *icingaredisv1.IcingaStatus, t *types.UnixMilli, envId types.Binary, shouldLog bool) error {
- var takeover, otherResponsible bool
+// realize a HA cycle triggered by a heartbeat event.
+//
+// shouldLogRoutineEvents indicates if recurrent events should be logged.
+func (h *HA) realize(
+ ctx context.Context,
+ s *icingaredisv1.IcingaStatus,
+ t *types.UnixMilli,
+ envId types.Binary,
+ shouldLogRoutineEvents bool,
+) error {
+ var (
+ takeover string
+ otherResponsible bool
+ )
+
+ if _, ok := ctx.Deadline(); !ok {
+ panic("can't use context w/o deadline in realize()")
+ }
err := retry.WithBackoff(
ctx,
func(ctx context.Context) error {
- takeover = false
+ takeover = ""
otherResponsible = false
isoLvl := sql.LevelSerializable
selectLock := ""
- if h.db.DriverName() == driver.MySQL {
+ if h.db.DriverName() == MySQL {
// The RDBMS may actually be a Percona XtraDB Cluster which doesn't
// support serializable transactions, but only their following equivalent:
isoLvl = sql.LevelRepeatableRead
@@ -264,25 +302,41 @@ func (h *HA) realize(ctx context.Context, s *icingaredisv1.IcingaStatus, t *type
}
query := h.db.Rebind("SELECT id, heartbeat FROM icingadb_instance "+
- "WHERE environment_id = ? AND responsible = ? AND id <> ? AND heartbeat > ?") + selectLock
+ "WHERE environment_id = ? AND responsible = ? AND id <> ?") + selectLock
instance := &v1.IcingadbInstance{}
+ errQuery := tx.QueryRowxContext(ctx, query, envId, "y", h.instanceId).StructScan(instance)
+
+ switch {
+ case errQuery == nil:
+ fields := []any{
+ zap.String("instance_id", instance.Id.String()),
+ zap.String("environment", envId.String()),
+ zap.Time("heartbeat", instance.Heartbeat.Time()),
+ zap.Duration("heartbeat_age", time.Since(instance.Heartbeat.Time())),
+ }
- errQuery := tx.QueryRowxContext(
- ctx, query, envId, "y", h.instanceId, time.Now().Add(-1*timeout).UnixMilli(),
- ).StructScan(instance)
- switch errQuery {
- case nil:
- otherResponsible = true
- if shouldLog {
- h.logger.Infow("Another instance is active",
- zap.String("instance_id", instance.Id.String()),
- zap.String("environment", envId.String()),
- "heartbeat", instance.Heartbeat,
- zap.Duration("heartbeat_age", time.Since(instance.Heartbeat.Time())))
+ if instance.Heartbeat.Time().Before(time.Now().Add(-1 * peerTimeout)) {
+ takeover = "other instance's heartbeat has expired"
+ h.logger.Debugw("Preparing to take over HA as other instance's heartbeat has expired", fields...)
+ } else {
+ otherResponsible = true
+ if shouldLogRoutineEvents {
+ h.logger.Infow("Another instance is active", fields...)
+ }
+ }
+
+ case errors.Is(errQuery, sql.ErrNoRows):
+ fields := []any{
+ zap.String("instance_id", h.instanceId.String()),
+ zap.String("environment", envId.String())}
+ if !h.responsible {
+ takeover = "no other instance is active"
+ h.logger.Debugw("Preparing to take over HA as no instance is active", fields...)
+ } else if h.responsible && shouldLogRoutineEvents {
+ h.logger.Debugw("Continuing being the active instance", fields...)
}
- case sql.ErrNoRows:
- takeover = true
+
default:
return internal.CantPerformQuery(errQuery, query)
}
@@ -297,7 +351,7 @@ func (h *HA) realize(ctx context.Context, s *icingaredisv1.IcingaStatus, t *type
EnvironmentId: envId,
},
Heartbeat: *t,
- Responsible: types.Bool{Bool: takeover || h.responsible, Valid: true},
+ Responsible: types.Bool{Bool: takeover != "" || h.responsible, Valid: true},
EndpointId: s.EndpointId,
Icinga2Version: s.Version,
Icinga2StartTime: s.ProgramStart,
@@ -314,7 +368,7 @@ func (h *HA) realize(ctx context.Context, s *icingaredisv1.IcingaStatus, t *type
return internal.CantPerformQuery(err, stmt)
}
- if takeover {
+ if takeover != "" {
stmt := h.db.Rebind("UPDATE icingadb_instance SET responsible = ? WHERE environment_id = ? AND id <> ?")
_, err := tx.ExecContext(ctx, stmt, "n", envId, h.instanceId)
@@ -330,16 +384,33 @@ func (h *HA) realize(ctx context.Context, s *icingaredisv1.IcingaStatus, t *type
return nil
},
retry.Retryable,
- backoff.NewExponentialWithJitter(time.Millisecond*256, time.Second*3),
+ backoff.NewExponentialWithJitter(256*time.Millisecond, 3*time.Second),
retry.Settings{
- OnError: func(_ time.Duration, attempt uint64, err, lastErr error) {
+ // Intentionally no timeout is set, as we use a context with a deadline.
+ OnRetryableError: func(_ time.Duration, attempt uint64, err, lastErr error) {
if lastErr == nil || err.Error() != lastErr.Error() {
log := h.logger.Debugw
- if attempt > 2 {
+ if attempt > 3 {
+ log = h.logger.Infow
+ }
+
+ log("Can't update or insert instance. Retrying", zap.Error(err))
+ }
+ },
+ OnSuccess: func(elapsed time.Duration, attempt uint64, lastErr error) {
+ if attempt > 1 {
+ log := h.logger.Debugw
+
+ if attempt > 4 {
+ // We log errors with severity info starting from the fourth attempt, (see above)
+ // so we need to log success with severity info from the fifth attempt.
log = h.logger.Infow
}
- log("Can't update or insert instance. Retrying", zap.Error(err), zap.Uint64("retry count", attempt))
+ log("Instance updated/inserted successfully after error",
+ zap.Duration("after", elapsed),
+ zap.Uint64("attempts", attempt),
+ zap.NamedError("recovered_error", lastErr))
}
},
},
@@ -348,14 +419,14 @@ func (h *HA) realize(ctx context.Context, s *icingaredisv1.IcingaStatus, t *type
return err
}
- if takeover {
+ if takeover != "" {
// Insert the environment after each heartbeat takeover if it does not already exist in the database
// as the environment may have changed, although this is likely to happen very rarely.
if err := h.insertEnvironment(); err != nil {
return errors.Wrap(err, "can't insert environment")
}
- h.signalTakeover()
+ h.signalTakeover(takeover)
} else if otherResponsible {
if state, _ := h.state.Load(); !state.otherResponsible {
state.otherResponsible = true
@@ -366,6 +437,7 @@ func (h *HA) realize(ctx context.Context, s *icingaredisv1.IcingaStatus, t *type
return nil
}
+// realizeLostHeartbeat updates "responsible = n" for this HA into the database.
func (h *HA) realizeLostHeartbeat() {
stmt := h.db.Rebind("UPDATE icingadb_instance SET responsible = ? WHERE id = ?")
if _, err := h.db.ExecContext(h.ctx, stmt, "n", h.instanceId); err != nil && !utils.IsContextCanceled(err) {
@@ -399,10 +471,10 @@ func (h *HA) removeOldInstances(s *icingaredisv1.IcingaStatus, envId types.Binar
select {
case <-h.ctx.Done():
return
- case <-time.After(timeout):
+ case <-time.After(peerTimeout):
query := h.db.Rebind("DELETE FROM icingadb_instance " +
"WHERE id <> ? AND environment_id = ? AND endpoint_id = ? AND heartbeat < ?")
- heartbeat := types.UnixMilli(time.Now().Add(-timeout))
+ heartbeat := types.UnixMilli(time.Now().Add(-1 * peerTimeout))
result, err := h.db.ExecContext(h.ctx, query, h.instanceId, envId,
s.EndpointId, heartbeat)
if err != nil {
@@ -421,7 +493,8 @@ func (h *HA) removeOldInstances(s *icingaredisv1.IcingaStatus, envId types.Binar
}
}
-func (h *HA) signalHandover() {
+// signalHandover gives up HA.responsible and notifies the HA.Handover chan.
+func (h *HA) signalHandover(reason string) {
if h.responsible {
h.state.Store(haState{
responsibleTsMilli: time.Now().UnixMilli(),
@@ -430,7 +503,7 @@ func (h *HA) signalHandover() {
})
select {
- case h.handover <- struct{}{}:
+ case h.handover <- reason:
h.responsible = false
case <-h.ctx.Done():
// Noop
@@ -438,7 +511,8 @@ func (h *HA) signalHandover() {
}
}
-func (h *HA) signalTakeover() {
+// signalTakeover claims HA.responsible and notifies the HA.Takeover chan.
+func (h *HA) signalTakeover(reason string) {
if !h.responsible {
h.state.Store(haState{
responsibleTsMilli: time.Now().UnixMilli(),
@@ -447,7 +521,7 @@ func (h *HA) signalTakeover() {
})
select {
- case h.takeover <- struct{}{}:
+ case h.takeover <- reason:
h.responsible = true
case <-h.ctx.Done():
// Noop
diff --git a/pkg/icingadb/history/sla.go b/pkg/icingadb/history/sla.go
index 79d22c7..7c0849e 100644
--- a/pkg/icingadb/history/sla.go
+++ b/pkg/icingadb/history/sla.go
@@ -1,10 +1,10 @@
package history
import (
- "github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/icingadb/v1/history"
"github.com/icinga/icingadb/pkg/structify"
"github.com/icinga/icingadb/pkg/types"
+ "github.com/redis/go-redis/v9"
"reflect"
)
diff --git a/pkg/icingadb/history/sync.go b/pkg/icingadb/history/sync.go
index dc8bc61..4be0e71 100644
--- a/pkg/icingadb/history/sync.go
+++ b/pkg/icingadb/history/sync.go
@@ -2,7 +2,6 @@ package history
import (
"context"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/internal"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/contracts"
@@ -17,6 +16,7 @@ import (
"github.com/icinga/icingadb/pkg/types"
"github.com/icinga/icingadb/pkg/utils"
"github.com/pkg/errors"
+ "github.com/redis/go-redis/v9"
"golang.org/x/sync/errgroup"
"reflect"
"sync"
@@ -144,7 +144,11 @@ func (s Sync) deleteFromRedis(ctx context.Context, key string, input <-chan redi
stream := "icinga:history:stream:" + key
for {
select {
- case bulk := <-bulks:
+ case bulk, ok := <-bulks:
+ if !ok {
+ return nil
+ }
+
ids := make([]string, len(bulk))
for i := range bulk {
ids[i] = bulk[i].ID
diff --git a/pkg/icingadb/objectpacker/objectpacker.go b/pkg/icingadb/objectpacker/objectpacker.go
index 9ddfdc8..0152745 100644
--- a/pkg/icingadb/objectpacker/objectpacker.go
+++ b/pkg/icingadb/objectpacker/objectpacker.go
@@ -6,7 +6,6 @@ import (
"fmt"
"github.com/pkg/errors"
"io"
- "io/ioutil"
"reflect"
"sort"
)
@@ -102,7 +101,7 @@ func packValue(in reflect.Value, out io.Writer) error {
// If there aren't any values to pack, ...
if l < 1 {
// ... create one and pack it - panics on disallowed type.
- _ = packValue(reflect.Zero(in.Type().Elem()), ioutil.Discard)
+ _ = packValue(reflect.Zero(in.Type().Elem()), io.Discard)
}
return nil
@@ -140,13 +139,13 @@ func packValue(in reflect.Value, out io.Writer) error {
packedKey = key.Slice(0, key.Len()).Interface().([]byte)
} else {
// Not just stringify the key (below), but also pack it (here) - panics on disallowed type.
- _ = packValue(iter.Key(), ioutil.Discard)
+ _ = packValue(iter.Key(), io.Discard)
packedKey = []byte(fmt.Sprint(key.Interface()))
}
} else {
// Not just stringify the key (below), but also pack it (here) - panics on disallowed type.
- _ = packValue(iter.Key(), ioutil.Discard)
+ _ = packValue(iter.Key(), io.Discard)
packedKey = []byte(fmt.Sprint(key.Interface()))
}
@@ -176,8 +175,8 @@ func packValue(in reflect.Value, out io.Writer) error {
typ := in.Type()
// ... create one and pack it - panics on disallowed type.
- _ = packValue(reflect.Zero(typ.Key()), ioutil.Discard)
- _ = packValue(reflect.Zero(typ.Elem()), ioutil.Discard)
+ _ = packValue(reflect.Zero(typ.Key()), io.Discard)
+ _ = packValue(reflect.Zero(typ.Elem()), io.Discard)
}
return nil
@@ -186,7 +185,7 @@ func packValue(in reflect.Value, out io.Writer) error {
err := packValue(reflect.Value{}, out)
// Create a fictive referenced value and pack it - panics on disallowed type.
- _ = packValue(reflect.Zero(in.Type().Elem()), ioutil.Discard)
+ _ = packValue(reflect.Zero(in.Type().Elem()), io.Discard)
return err
} else {
diff --git a/pkg/icingadb/overdue/sync.go b/pkg/icingadb/overdue/sync.go
index 5cd4d67..377592a 100644
--- a/pkg/icingadb/overdue/sync.go
+++ b/pkg/icingadb/overdue/sync.go
@@ -4,7 +4,6 @@ import (
"context"
_ "embed"
"fmt"
- "github.com/go-redis/redis/v8"
"github.com/google/uuid"
"github.com/icinga/icingadb/internal"
"github.com/icinga/icingadb/pkg/com"
@@ -17,6 +16,7 @@ import (
"github.com/icinga/icingadb/pkg/logging"
"github.com/icinga/icingadb/pkg/periodic"
"github.com/pkg/errors"
+ "github.com/redis/go-redis/v9"
"golang.org/x/sync/errgroup"
"regexp"
"strconv"
diff --git a/pkg/icingadb/runtime_updates.go b/pkg/icingadb/runtime_updates.go
index dfee9c0..a56263a 100644
--- a/pkg/icingadb/runtime_updates.go
+++ b/pkg/icingadb/runtime_updates.go
@@ -3,7 +3,6 @@ package icingadb
import (
"context"
"fmt"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/common"
"github.com/icinga/icingadb/pkg/contracts"
@@ -15,6 +14,7 @@ import (
"github.com/icinga/icingadb/pkg/structify"
"github.com/icinga/icingadb/pkg/utils"
"github.com/pkg/errors"
+ "github.com/redis/go-redis/v9"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
diff --git a/pkg/icingadb/v1/checkable.go b/pkg/icingadb/v1/checkable.go
index dbb114c..4b1efeb 100644
--- a/pkg/icingadb/v1/checkable.go
+++ b/pkg/icingadb/v1/checkable.go
@@ -30,7 +30,7 @@ type Checkable struct {
IconImageAlt string `json:"icon_image_alt"`
IconImageId types.Binary `json:"icon_image_id"`
IsVolatile types.Bool `json:"is_volatile"`
- MaxCheckAttempts float64 `json:"max_check_attempts"`
+ MaxCheckAttempts uint32 `json:"max_check_attempts"`
Notes string `json:"notes"`
NotesUrlId types.Binary `json:"notes_url_id"`
NotificationsEnabled types.Bool `json:"notifications_enabled"`
diff --git a/pkg/icingadb/v1/history/state.go b/pkg/icingadb/v1/history/state.go
index dec13b0..6320b73 100644
--- a/pkg/icingadb/v1/history/state.go
+++ b/pkg/icingadb/v1/history/state.go
@@ -14,7 +14,7 @@ type StateHistory struct {
HardState uint8 `json:"hard_state"`
PreviousSoftState uint8 `json:"previous_soft_state"`
PreviousHardState uint8 `json:"previous_hard_state"`
- CheckAttempt uint8 `json:"check_attempt"`
+ CheckAttempt uint32 `json:"check_attempt"`
Output types.String `json:"output"`
LongOutput types.String `json:"long_output"`
MaxCheckAttempts uint32 `json:"max_check_attempts"`
diff --git a/pkg/icingadb/v1/state.go b/pkg/icingadb/v1/state.go
index bad8f28..983b14d 100644
--- a/pkg/icingadb/v1/state.go
+++ b/pkg/icingadb/v1/state.go
@@ -9,7 +9,7 @@ type State struct {
EnvironmentMeta `json:",inline"`
AcknowledgementCommentId types.Binary `json:"acknowledgement_comment_id"`
LastCommentId types.Binary `json:"last_comment_id"`
- CheckAttempt uint8 `json:"check_attempt"`
+ CheckAttempt uint32 `json:"check_attempt"`
CheckCommandline types.String `json:"check_commandline"`
CheckSource types.String `json:"check_source"`
SchedulingSource types.String `json:"scheduling_source"`
diff --git a/pkg/icingaredis/client.go b/pkg/icingaredis/client.go
index d42713c..c494f95 100644
--- a/pkg/icingaredis/client.go
+++ b/pkg/icingaredis/client.go
@@ -2,7 +2,6 @@ package icingaredis
import (
"context"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/common"
"github.com/icinga/icingadb/pkg/contracts"
@@ -10,6 +9,7 @@ import (
"github.com/icinga/icingadb/pkg/periodic"
"github.com/icinga/icingadb/pkg/utils"
"github.com/pkg/errors"
+ "github.com/redis/go-redis/v9"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
"runtime"
diff --git a/pkg/icingaredis/heartbeat.go b/pkg/icingaredis/heartbeat.go
index 9a8ebad..cb34010 100644
--- a/pkg/icingaredis/heartbeat.go
+++ b/pkg/icingaredis/heartbeat.go
@@ -2,13 +2,13 @@ package icingaredis
import (
"context"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/internal"
v1 "github.com/icinga/icingadb/pkg/icingaredis/v1"
"github.com/icinga/icingadb/pkg/logging"
"github.com/icinga/icingadb/pkg/types"
"github.com/icinga/icingadb/pkg/utils"
"github.com/pkg/errors"
+ "github.com/redis/go-redis/v9"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"sync"
@@ -16,9 +16,9 @@ import (
"time"
)
-// timeout defines how long a heartbeat may be absent if a heartbeat has already been received.
+// Timeout defines how long a heartbeat may be absent if a heartbeat has already been received.
// After this time, a heartbeat loss is propagated.
-var timeout = 60 * time.Second
+const Timeout = time.Minute
// Heartbeat periodically reads heartbeats from a Redis stream and signals in Beat channels when they are received.
// Also signals on if the heartbeat is Lost.
@@ -97,7 +97,7 @@ func (h *Heartbeat) controller(ctx context.Context) {
// Message producer loop.
g.Go(func() error {
// We expect heartbeats every second but only read them every 3 seconds.
- throttle := time.NewTicker(time.Second * 3)
+ throttle := time.NewTicker(3 * time.Second)
defer throttle.Stop()
for id := "$"; ; {
@@ -141,9 +141,9 @@ func (h *Heartbeat) controller(ctx context.Context) {
atomic.StoreInt64(&h.lastReceivedMs, m.received.UnixMilli())
h.sendEvent(m)
- case <-time.After(timeout):
+ case <-time.After(Timeout):
if h.active {
- h.logger.Warnw("Lost Icinga heartbeat", zap.Duration("timeout", timeout))
+ h.logger.Warnw("Lost Icinga heartbeat", zap.Duration("timeout", Timeout))
h.sendEvent(nil)
h.active = false
} else {
@@ -217,5 +217,5 @@ func (m *HeartbeatMessage) EnvironmentID() (types.Binary, error) {
// ExpiryTime returns the timestamp when the heartbeat expires.
func (m *HeartbeatMessage) ExpiryTime() time.Time {
- return m.received.Add(timeout)
+ return m.received.Add(Timeout)
}
diff --git a/pkg/icingaredis/telemetry/heartbeat.go b/pkg/icingaredis/telemetry/heartbeat.go
index ee476a1..0057ae0 100644
--- a/pkg/icingaredis/telemetry/heartbeat.go
+++ b/pkg/icingaredis/telemetry/heartbeat.go
@@ -3,7 +3,6 @@ package telemetry
import (
"context"
"fmt"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/internal"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/icingaredis"
@@ -11,6 +10,7 @@ import (
"github.com/icinga/icingadb/pkg/periodic"
"github.com/icinga/icingadb/pkg/utils"
"github.com/pkg/errors"
+ "github.com/redis/go-redis/v9"
"go.uber.org/zap"
"regexp"
"runtime/metrics"
diff --git a/pkg/icingaredis/telemetry/stats.go b/pkg/icingaredis/telemetry/stats.go
index 86db0b3..2b592a5 100644
--- a/pkg/icingaredis/telemetry/stats.go
+++ b/pkg/icingaredis/telemetry/stats.go
@@ -2,12 +2,12 @@ package telemetry
import (
"context"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/icingaredis"
"github.com/icinga/icingadb/pkg/logging"
"github.com/icinga/icingadb/pkg/periodic"
"github.com/icinga/icingadb/pkg/utils"
+ "github.com/redis/go-redis/v9"
"go.uber.org/zap"
"strconv"
"time"
diff --git a/pkg/icingaredis/utils.go b/pkg/icingaredis/utils.go
index 9176dba..50c97f9 100644
--- a/pkg/icingaredis/utils.go
+++ b/pkg/icingaredis/utils.go
@@ -2,13 +2,13 @@ package icingaredis
import (
"context"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icingadb/internal"
"github.com/icinga/icingadb/pkg/com"
"github.com/icinga/icingadb/pkg/contracts"
"github.com/icinga/icingadb/pkg/types"
"github.com/icinga/icingadb/pkg/utils"
"github.com/pkg/errors"
+ "github.com/redis/go-redis/v9"
"golang.org/x/sync/errgroup"
)
diff --git a/pkg/retry/retry.go b/pkg/retry/retry.go
index da73943..e5b93de 100644
--- a/pkg/retry/retry.go
+++ b/pkg/retry/retry.go
@@ -7,12 +7,15 @@ import (
"github.com/icinga/icingadb/pkg/backoff"
"github.com/lib/pq"
"github.com/pkg/errors"
+ "io"
"net"
- "strings"
"syscall"
"time"
)
+// DefaultTimeout is our opinionated default timeout for retrying database and Redis operations.
+const DefaultTimeout = 5 * time.Minute
+
// RetryableFunc is a retryable function.
type RetryableFunc func(context.Context) error
@@ -21,10 +24,15 @@ type IsRetryable func(error) bool
// Settings aggregates optional settings for WithBackoff.
type Settings struct {
- // Timeout lets WithBackoff give up once elapsed (if >0).
+ // If >0, Timeout lets WithBackoff stop retrying gracefully once elapsed based on the following criteria:
+ // * If the execution of RetryableFunc has taken longer than Timeout, no further attempts are made.
+ // * If Timeout elapses during the sleep phase between retries, one final retry is attempted.
+ // * RetryableFunc is always granted its full execution time and is not canceled if it exceeds Timeout.
+ // This means that WithBackoff may not stop exactly after Timeout expires,
+ // or may not retry at all if the first execution of RetryableFunc already takes longer than Timeout.
Timeout time.Duration
- // OnError is called if an error occurs.
- OnError func(elapsed time.Duration, attempt uint64, err, lastErr error)
+ // OnRetryableError is called if a retryable error occurs.
+ OnRetryableError func(elapsed time.Duration, attempt uint64, err, lastErr error)
// OnSuccess is called once the operation succeeds.
OnSuccess func(elapsed time.Duration, attempt uint64, lastErr error)
}
@@ -34,16 +42,19 @@ type Settings struct {
func WithBackoff(
ctx context.Context, retryableFunc RetryableFunc, retryable IsRetryable, b backoff.Backoff, settings Settings,
) (err error) {
- parentCtx := ctx
+ // Channel for retry deadline, which is set to the channel of NewTimer() if a timeout is configured,
+ // otherwise nil, so that it blocks forever if there is no timeout.
+ var timeout <-chan time.Time
if settings.Timeout > 0 {
- var cancelCtx context.CancelFunc
- ctx, cancelCtx = context.WithTimeout(ctx, settings.Timeout)
- defer cancelCtx()
+ t := time.NewTimer(settings.Timeout)
+ defer t.Stop()
+ timeout = t.C
}
start := time.Now()
- for attempt := uint64(0); ; /* true */ attempt++ {
+ timedOut := false
+ for attempt := uint64(1); ; /* true */ attempt++ {
prevErr := err
if err = retryableFunc(ctx); err == nil {
@@ -54,43 +65,72 @@ func WithBackoff(
return
}
- if settings.OnError != nil {
- settings.OnError(time.Since(start), attempt, err, prevErr)
+ // Retryable function may have exited prematurely due to context errors.
+ // We explicitly check the context error here, as the error returned by the retryable function can pass the
+ // error.Is() checks even though it is not a real context error, e.g.
+ // https://cs.opensource.google/go/go/+/refs/tags/go1.22.2:src/net/net.go;l=422
+ // https://cs.opensource.google/go/go/+/refs/tags/go1.22.2:src/net/net.go;l=601
+ if errors.Is(ctx.Err(), context.DeadlineExceeded) || errors.Is(ctx.Err(), context.Canceled) {
+ if prevErr != nil {
+ err = errors.Wrap(err, prevErr.Error())
+ }
+
+ return
}
- isRetryable := retryable(err)
+ if !retryable(err) {
+ err = errors.Wrap(err, "can't retry")
- if prevErr != nil && (errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled)) {
- err = prevErr
+ return
}
- if !isRetryable {
- err = errors.Wrap(err, "can't retry")
+ select {
+ case <-timeout:
+ // Stop retrying immediately if executing the retryable function took longer than the timeout.
+ timedOut = true
+ default:
+ }
+
+ if timedOut {
+ err = errors.Wrap(err, "retry deadline exceeded")
return
}
- sleep := b(attempt)
+ if settings.OnRetryableError != nil {
+ settings.OnRetryableError(time.Since(start), attempt, err, prevErr)
+ }
+
select {
+ case <-time.After(b(attempt)):
+ case <-timeout:
+ // Do not stop retrying immediately, but start one last attempt to mitigate timing issues where
+ // the timeout expires while waiting for the next attempt and
+ // therefore no retries have happened during this possibly long period.
+ timedOut = true
case <-ctx.Done():
- if outerErr := parentCtx.Err(); outerErr != nil {
- err = errors.Wrap(outerErr, "outer context canceled")
- } else {
- if err == nil {
- err = ctx.Err()
- }
- err = errors.Wrap(err, "can't retry")
- }
+ err = errors.Wrap(ctx.Err(), err.Error())
return
- case <-time.After(sleep):
}
}
}
+// ResetTimeout changes the possibly expired timer t to expire after duration d.
+//
+// If the timer has already expired and nothing has been received from its channel,
+// it is automatically drained as if the timer had never expired.
+func ResetTimeout(t *time.Timer, d time.Duration) {
+ if !t.Stop() {
+ <-t.C
+ }
+
+ t.Reset(d)
+}
+
// Retryable returns true for common errors that are considered retryable,
// i.e. temporary, timeout, DNS, connection refused and reset, host down and unreachable and
-// network down and unreachable errors.
+// network down and unreachable errors. In addition, any database error is considered retryable.
func Retryable(err error) bool {
var temporary interface {
Temporary() bool
@@ -133,6 +173,12 @@ func Retryable(err error) bool {
if errors.Is(err, syscall.ENETDOWN) || errors.Is(err, syscall.ENETUNREACH) {
return true
}
+ if errors.Is(err, syscall.EPIPE) {
+ return true
+ }
+ if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
+ return true
+ }
if errors.Is(err, driver.ErrBadConn) {
return true
@@ -141,43 +187,10 @@ func Retryable(err error) bool {
return true
}
- var e *mysql.MySQLError
- if errors.As(err, &e) {
- switch e.Number {
- case 1053, 1205, 1213, 2006:
- // 1053: Server shutdown in progress
- // 1205: Lock wait timeout
- // 1213: Deadlock found when trying to get lock
- // 2006: MySQL server has gone away
- return true
- default:
- return false
- }
- }
-
- var pe *pq.Error
- if errors.As(err, &pe) {
- switch pe.Code {
- case "08000", // connection_exception
- "08006", // connection_failure
- "08001", // sqlclient_unable_to_establish_sqlconnection
- "08004", // sqlserver_rejected_establishment_of_sqlconnection
- "40001", // serialization_failure
- "40P01", // deadlock_detected
- "54000", // program_limit_exceeded
- "55006", // object_in_use
- "55P03", // lock_not_available
- "57P01", // admin_shutdown
- "57P02", // crash_shutdown
- "57P03", // cannot_connect_now
- "58000", // system_error
- "58030", // io_error
- "XX000": // internal_error
- return true
- default:
- // Class 53 - Insufficient Resources
- return strings.HasPrefix(string(pe.Code), "53")
- }
+ var mye *mysql.MySQLError
+ var pqe *pq.Error
+ if errors.As(err, &mye) || errors.As(err, &pqe) {
+ return true
}
return false
diff --git a/pkg/types/string.go b/pkg/types/string.go
index f8ead45..ce2a4ac 100644
--- a/pkg/types/string.go
+++ b/pkg/types/string.go
@@ -15,6 +15,14 @@ type String struct {
sql.NullString
}
+// MakeString constructs a new non-NULL String from s.
+func MakeString(s string) String {
+ return String{sql.NullString{
+ String: s,
+ Valid: true,
+ }}
+}
+
// MarshalJSON implements the json.Marshaler interface.
// Supports JSON null.
func (s String) MarshalJSON() ([]byte, error) {
diff --git a/schema/mysql/schema.sql b/schema/mysql/schema.sql
index f4434f1..745a5e6 100644
--- a/schema/mysql/schema.sql
+++ b/schema/mysql/schema.sql
@@ -292,7 +292,7 @@ CREATE TABLE host_state (
hard_state tinyint unsigned NOT NULL,
previous_soft_state tinyint unsigned NOT NULL,
previous_hard_state tinyint unsigned NOT NULL,
- check_attempt tinyint unsigned NOT NULL,
+ check_attempt int unsigned NOT NULL,
severity smallint unsigned NOT NULL,
output longtext DEFAULT NULL,
@@ -460,7 +460,7 @@ CREATE TABLE service_state (
hard_state tinyint unsigned NOT NULL,
previous_soft_state tinyint unsigned NOT NULL,
previous_hard_state tinyint unsigned NOT NULL,
- check_attempt tinyint unsigned NOT NULL,
+ check_attempt int unsigned NOT NULL,
severity smallint unsigned NOT NULL,
output longtext DEFAULT NULL,
@@ -1147,7 +1147,7 @@ CREATE TABLE state_history (
hard_state tinyint unsigned NOT NULL,
previous_soft_state tinyint unsigned NOT NULL,
previous_hard_state tinyint unsigned NOT NULL,
- check_attempt tinyint unsigned NOT NULL,
+ check_attempt int unsigned NOT NULL, -- may be a tinyint unsigned, see https://icinga.com/docs/icinga-db/latest/doc/04-Upgrading/#upgrading-to-icinga-db-v112
output longtext DEFAULT NULL,
long_output longtext DEFAULT NULL,
max_check_attempts int unsigned NOT NULL,
@@ -1289,7 +1289,7 @@ CREATE TABLE history (
CONSTRAINT fk_history_notification_history FOREIGN KEY (notification_history_id) REFERENCES notification_history (id) ON DELETE CASCADE,
CONSTRAINT fk_history_state_history FOREIGN KEY (state_history_id) REFERENCES state_history (id) ON DELETE CASCADE,
- INDEX idx_history_event_time (event_time) COMMENT 'History filtered/ordered by event_time',
+ INDEX idx_history_event_time_event_type (event_time, event_type) COMMENT 'History filtered/ordered by event_time/event_type',
INDEX idx_history_acknowledgement (acknowledgement_history_id),
INDEX idx_history_comment (comment_history_id),
INDEX idx_history_downtime (downtime_history_id),
@@ -1343,4 +1343,4 @@ CREATE TABLE icingadb_schema (
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC;
INSERT INTO icingadb_schema (version, timestamp)
- VALUES (4, CURRENT_TIMESTAMP() * 1000);
+ VALUES (5, UNIX_TIMESTAMP() * 1000);
diff --git a/schema/mysql/upgrades/1.0.0-rc2.sql b/schema/mysql/upgrades/1.0.0-rc2.sql
index 50fb2f9..d4695cd 100644
--- a/schema/mysql/upgrades/1.0.0-rc2.sql
+++ b/schema/mysql/upgrades/1.0.0-rc2.sql
@@ -156,7 +156,7 @@ ALTER TABLE acknowledgement_history
MODIFY is_persistent enum('n','y') DEFAULT NULL COMMENT 'NULL if ack_set event happened before Icinga DB history recording';
INSERT INTO icingadb_schema (version, timestamp)
- VALUES (2, CURRENT_TIMESTAMP() * 1000);
+ VALUES (2, UNIX_TIMESTAMP() * 1000);
ALTER TABLE host_state
MODIFY output longtext DEFAULT NULL,
diff --git a/schema/mysql/upgrades/1.0.0.sql b/schema/mysql/upgrades/1.0.0.sql
index 16bb45e..054e10e 100644
--- a/schema/mysql/upgrades/1.0.0.sql
+++ b/schema/mysql/upgrades/1.0.0.sql
@@ -288,4 +288,4 @@ INSERT INTO sla_history_downtime
ON DUPLICATE KEY UPDATE sla_history_downtime.downtime_id = sla_history_downtime.downtime_id;
INSERT INTO icingadb_schema (version, TIMESTAMP)
- VALUES (3, CURRENT_TIMESTAMP() * 1000);
+ VALUES (3, UNIX_TIMESTAMP() * 1000);
diff --git a/schema/mysql/upgrades/1.1.1.sql b/schema/mysql/upgrades/1.1.1.sql
index 264ecae..b0d5b69 100644
--- a/schema/mysql/upgrades/1.1.1.sql
+++ b/schema/mysql/upgrades/1.1.1.sql
@@ -34,4 +34,4 @@ ALTER TABLE history
UNLOCK TABLES;
INSERT INTO icingadb_schema (version, timestamp)
- VALUES (4, CURRENT_TIMESTAMP() * 1000);
+ VALUES (4, UNIX_TIMESTAMP() * 1000);
diff --git a/schema/mysql/upgrades/1.2.0.sql b/schema/mysql/upgrades/1.2.0.sql
new file mode 100644
index 0000000..74cbcb1
--- /dev/null
+++ b/schema/mysql/upgrades/1.2.0.sql
@@ -0,0 +1,13 @@
+UPDATE icingadb_schema SET timestamp = UNIX_TIMESTAMP(timestamp / 1000) * 1000 WHERE timestamp > 20000000000000000;
+
+ALTER TABLE history ADD INDEX idx_history_event_time_event_type (event_time, event_type) COMMENT 'History filtered/ordered by event_time/event_type';
+ALTER TABLE history DROP INDEX idx_history_event_time;
+
+ALTER TABLE host_state MODIFY COLUMN check_attempt int unsigned NOT NULL;
+
+ALTER TABLE service_state MODIFY COLUMN check_attempt int unsigned NOT NULL;
+
+ALTER TABLE state_history MODIFY COLUMN check_attempt tinyint unsigned NOT NULL COMMENT 'optional schema upgrade not applied yet, see https://icinga.com/docs/icinga-db/latest/doc/04-Upgrading/#upgrading-to-icinga-db-v112';
+
+INSERT INTO icingadb_schema (version, timestamp)
+ VALUES (5, UNIX_TIMESTAMP() * 1000);
diff --git a/schema/mysql/upgrades/optional/1.2.0-history.sql b/schema/mysql/upgrades/optional/1.2.0-history.sql
new file mode 100644
index 0000000..4081fcb
--- /dev/null
+++ b/schema/mysql/upgrades/optional/1.2.0-history.sql
@@ -0,0 +1 @@
+ALTER TABLE state_history MODIFY COLUMN check_attempt int unsigned NOT NULL;
diff --git a/schema/pgsql/schema.sql b/schema/pgsql/schema.sql
index 9027fac..708c914 100644
--- a/schema/pgsql/schema.sql
+++ b/schema/pgsql/schema.sql
@@ -166,7 +166,7 @@ BEGIN
END IF;
END LOOP;
- RETURN 100 * (total_time - problem_time) / total_time;
+ RETURN (100 * (total_time - problem_time)::decimal / total_time)::decimal(7, 4);
END;
$$;
@@ -405,7 +405,7 @@ CREATE TABLE host_state (
hard_state tinyuint NOT NULL,
previous_soft_state tinyuint NOT NULL,
previous_hard_state tinyuint NOT NULL,
- check_attempt tinyuint NOT NULL,
+ check_attempt uint NOT NULL,
severity smalluint NOT NULL,
output text DEFAULT NULL,
@@ -675,7 +675,7 @@ CREATE TABLE service_state (
hard_state tinyuint NOT NULL,
previous_soft_state tinyuint NOT NULL,
previous_hard_state tinyuint NOT NULL,
- check_attempt tinyuint NOT NULL,
+ check_attempt uint NOT NULL,
severity smalluint NOT NULL,
output text DEFAULT NULL,
@@ -1846,7 +1846,7 @@ CREATE TABLE state_history (
hard_state tinyuint NOT NULL,
previous_soft_state tinyuint NOT NULL,
previous_hard_state tinyuint NOT NULL,
- check_attempt tinyuint NOT NULL,
+ check_attempt uint NOT NULL, -- may be a tinyuint, see https://icinga.com/docs/icinga-db/latest/doc/04-Upgrading/#upgrading-to-icinga-db-v112
output text DEFAULT NULL,
long_output text DEFAULT NULL,
max_check_attempts uint NOT NULL,
@@ -2074,7 +2074,7 @@ ALTER TABLE history ALTER COLUMN comment_history_id SET STORAGE PLAIN;
ALTER TABLE history ALTER COLUMN flapping_history_id SET STORAGE PLAIN;
ALTER TABLE history ALTER COLUMN acknowledgement_history_id SET STORAGE PLAIN;
-CREATE INDEX idx_history_event_time ON history(event_time);
+CREATE INDEX idx_history_event_time_event_type ON history(event_time, event_type);
CREATE INDEX idx_history_acknowledgement ON history(acknowledgement_history_id);
CREATE INDEX idx_history_comment ON history(comment_history_id);
CREATE INDEX idx_history_downtime ON history(downtime_history_id);
@@ -2095,7 +2095,7 @@ COMMENT ON COLUMN history.comment_history_id IS 'comment_history.comment_id';
COMMENT ON COLUMN history.flapping_history_id IS 'flapping_history.id';
COMMENT ON COLUMN history.acknowledgement_history_id IS 'acknowledgement_history.id';
-COMMENT ON INDEX idx_history_event_time IS 'History filtered/ordered by event_time';
+COMMENT ON INDEX idx_history_event_time_event_type IS 'History filtered/ordered by event_time/event_type';
COMMENT ON INDEX idx_history_host_service_id IS 'Host/service history detail filter';
CREATE TABLE sla_history_state (
@@ -2181,4 +2181,4 @@ CREATE TABLE icingadb_schema (
ALTER SEQUENCE icingadb_schema_id_seq OWNED BY icingadb_schema.id;
INSERT INTO icingadb_schema (version, timestamp)
- VALUES (2, extract(epoch from now()) * 1000);
+ VALUES (3, extract(epoch from now()) * 1000);
diff --git a/schema/pgsql/upgrades/1.2.0.sql b/schema/pgsql/upgrades/1.2.0.sql
new file mode 100644
index 0000000..2203ffb
--- /dev/null
+++ b/schema/pgsql/upgrades/1.2.0.sql
@@ -0,0 +1,153 @@
+CREATE OR REPLACE FUNCTION get_sla_ok_percent(
+ in_host_id bytea20,
+ in_service_id bytea20,
+ in_start_time biguint,
+ in_end_time biguint
+)
+RETURNS decimal(7, 4)
+LANGUAGE plpgsql
+STABLE
+PARALLEL RESTRICTED
+AS $$
+DECLARE
+ last_event_time biguint := in_start_time;
+ last_hard_state tinyuint;
+ active_downtimes uint := 0;
+ problem_time biguint := 0;
+ total_time biguint;
+ row record;
+BEGIN
+ IF in_end_time <= in_start_time THEN
+ RAISE 'end time must be greater than start time';
+ END IF;
+
+ total_time := in_end_time - in_start_time;
+
+ -- Use the latest event at or before the beginning of the SLA interval as the initial state.
+ SELECT hard_state INTO last_hard_state
+ FROM sla_history_state s
+ WHERE s.host_id = in_host_id
+ AND ((in_service_id IS NULL AND s.service_id IS NULL) OR s.service_id = in_service_id)
+ AND s.event_time <= in_start_time
+ ORDER BY s.event_time DESC
+ LIMIT 1;
+
+ -- If this does not exist, use the previous state from the first event after the beginning of the SLA interval.
+ IF last_hard_state IS NULL THEN
+ SELECT previous_hard_state INTO last_hard_state
+ FROM sla_history_state s
+ WHERE s.host_id = in_host_id
+ AND ((in_service_id IS NULL AND s.service_id IS NULL) OR s.service_id = in_service_id)
+ AND s.event_time > in_start_time
+ ORDER BY s.event_time ASC
+ LIMIT 1;
+ END IF;
+
+ -- If this also does not exist, use the current host/service state.
+ IF last_hard_state IS NULL THEN
+ IF in_service_id IS NULL THEN
+ SELECT hard_state INTO last_hard_state
+ FROM host_state s
+ WHERE s.host_id = in_host_id;
+ ELSE
+ SELECT hard_state INTO last_hard_state
+ FROM service_state s
+ WHERE s.host_id = in_host_id
+ AND s.service_id = in_service_id;
+ END IF;
+ END IF;
+
+ IF last_hard_state IS NULL THEN
+ last_hard_state := 0;
+ END IF;
+
+ FOR row IN
+ (
+ -- all downtime_start events before the end of the SLA interval
+ -- for downtimes that overlap the SLA interval in any way
+ SELECT
+ GREATEST(downtime_start, in_start_time) AS event_time,
+ 'downtime_start' AS event_type,
+ 1 AS event_prio,
+ NULL::tinyuint AS hard_state,
+ NULL::tinyuint AS previous_hard_state
+ FROM sla_history_downtime d
+ WHERE d.host_id = in_host_id
+ AND ((in_service_id IS NULL AND d.service_id IS NULL) OR d.service_id = in_service_id)
+ AND d.downtime_start < in_end_time
+ AND d.downtime_end >= in_start_time
+ ) UNION ALL (
+ -- all downtime_end events before the end of the SLA interval
+ -- for downtimes that overlap the SLA interval in any way
+ SELECT
+ downtime_end AS event_time,
+ 'downtime_end' AS event_type,
+ 2 AS event_prio,
+ NULL::tinyuint AS hard_state,
+ NULL::tinyuint AS previous_hard_state
+ FROM sla_history_downtime d
+ WHERE d.host_id = in_host_id
+ AND ((in_service_id IS NULL AND d.service_id IS NULL) OR d.service_id = in_service_id)
+ AND d.downtime_start < in_end_time
+ AND d.downtime_end >= in_start_time
+ AND d.downtime_end < in_end_time
+ ) UNION ALL (
+ -- all state events strictly in interval
+ SELECT
+ event_time,
+ 'state_change' AS event_type,
+ 0 AS event_prio,
+ hard_state,
+ previous_hard_state
+ FROM sla_history_state s
+ WHERE s.host_id = in_host_id
+ AND ((in_service_id IS NULL AND s.service_id IS NULL) OR s.service_id = in_service_id)
+ AND s.event_time > in_start_time
+ AND s.event_time < in_end_time
+ ) UNION ALL (
+ -- end event to keep loop simple, values are not used
+ SELECT
+ in_end_time AS event_time,
+ 'end' AS event_type,
+ 3 AS event_prio,
+ NULL::tinyuint AS hard_state,
+ NULL::tinyuint AS previous_hard_state
+ )
+ ORDER BY event_time, event_prio
+ LOOP
+ IF row.previous_hard_state = 99 THEN
+ total_time := total_time - (row.event_time - last_event_time);
+ ELSEIF ((in_service_id IS NULL AND last_hard_state > 0) OR (in_service_id IS NOT NULL AND last_hard_state > 1))
+ AND last_hard_state != 99
+ AND active_downtimes = 0
+ THEN
+ problem_time := problem_time + row.event_time - last_event_time;
+ END IF;
+
+ last_event_time := row.event_time;
+ IF row.event_type = 'state_change' THEN
+ last_hard_state := row.hard_state;
+ ELSEIF row.event_type = 'downtime_start' THEN
+ active_downtimes := active_downtimes + 1;
+ ELSEIF row.event_type = 'downtime_end' THEN
+ active_downtimes := active_downtimes - 1;
+ END IF;
+ END LOOP;
+
+ RETURN (100 * (total_time - problem_time)::decimal / total_time)::decimal(7, 4);
+END;
+$$;
+
+CREATE INDEX CONCURRENTLY idx_history_event_time_event_type ON history(event_time, event_type);
+COMMENT ON INDEX idx_history_event_time_event_type IS 'History filtered/ordered by event_time/event_type';
+
+DROP INDEX idx_history_event_time;
+
+ALTER TABLE host_state ALTER COLUMN check_attempt TYPE uint;
+
+ALTER TABLE service_state ALTER COLUMN check_attempt TYPE uint;
+
+COMMENT ON COLUMN state_history.check_attempt IS 'optional schema upgrade not applied yet, see https://icinga.com/docs/icinga-db/latest/doc/04-Upgrading/#upgrading-to-icinga-db-v112';
+
+INSERT INTO icingadb_schema (version, timestamp)
+ VALUES (3, extract(epoch from now()) * 1000);
diff --git a/schema/pgsql/upgrades/optional/1.2-0-history.sql b/schema/pgsql/upgrades/optional/1.2-0-history.sql
new file mode 100644
index 0000000..ea95765
--- /dev/null
+++ b/schema/pgsql/upgrades/optional/1.2-0-history.sql
@@ -0,0 +1,3 @@
+ALTER TABLE state_history ALTER COLUMN check_attempt TYPE uint;
+
+COMMENT ON COLUMN state_history.check_attempt IS NULL;
diff --git a/tests/go.mod b/tests/go.mod
index aca258a..56b7253 100644
--- a/tests/go.mod
+++ b/tests/go.mod
@@ -1,44 +1,46 @@
module github.com/icinga/icingadb/tests
-go 1.18
+go 1.22
require (
- github.com/go-redis/redis/v8 v8.11.4
- github.com/go-sql-driver/mysql v1.6.0
- github.com/goccy/go-yaml v1.9.5
- github.com/google/uuid v1.3.0
- github.com/icinga/icinga-testing v0.0.0-20220516144008-9600081b7a69
- github.com/jmoiron/sqlx v1.3.4
- github.com/lib/pq v1.10.5
- github.com/stretchr/testify v1.7.0
- go.uber.org/zap v1.21.0
+ github.com/go-sql-driver/mysql v1.8.1
+ github.com/goccy/go-yaml v1.11.3
+ github.com/google/uuid v1.6.0
+ github.com/icinga/icinga-testing v0.0.0-20240322142451-494ccd6d03e8
+ github.com/jmoiron/sqlx v1.3.5
+ github.com/lib/pq v1.10.9
+ github.com/redis/go-redis/v9 v9.5.1
+ github.com/stretchr/testify v1.9.0
+ go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20221012112151-59b0eab1532e
- golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
+ golang.org/x/sync v0.7.0
)
require (
+ filippo.io/edwards25519 v1.1.0 // indirect
github.com/Icinga/go-libs v0.0.0-20220420130327-ef58ad52edd8 // indirect
- github.com/Microsoft/go-winio v0.5.0 // indirect
- github.com/benbjohnson/clock v1.1.0 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
+ github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
- github.com/docker/distribution v2.8.2+incompatible // indirect
- github.com/docker/docker v24.0.5-0.20230721180626-a61e2b4c9c5f+incompatible // indirect
+ github.com/distribution/reference v0.5.0 // indirect
+ github.com/docker/distribution v2.8.3+incompatible // indirect
+ github.com/docker/docker v24.0.9+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
- github.com/docker/go-units v0.4.0 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
github.com/fatih/color v1.10.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
- github.com/opencontainers/image-spec v1.0.2 // indirect
+ github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- go.uber.org/atomic v1.9.0 // indirect
- go.uber.org/multierr v1.7.0 // indirect
- golang.org/x/net v0.7.0 // indirect
- golang.org/x/sys v0.5.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/mod v0.14.0 // indirect
+ golang.org/x/net v0.19.0 // indirect
+ golang.org/x/sys v0.16.0 // indirect
+ golang.org/x/tools v0.16.1 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/tests/go.sum b/tests/go.sum
index b283b96..a636cd4 100644
--- a/tests/go.sum
+++ b/tests/go.sum
@@ -1,1065 +1,251 @@
-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
-github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
-github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Icinga/go-libs v0.0.0-20220420130327-ef58ad52edd8 h1:hG4Y/LPERK9i+P8/jnYlq9PeDd9deIkwEWOIimDU3uk=
github.com/Icinga/go-libs v0.0.0-20220420130327-ef58ad52edd8/go.mod h1:xlgU55MKs/vIg1fMlAEBSrslahYayZNwjXvf3w1dvyA=
-github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
-github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
-github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
-github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
-github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
-github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
-github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
-github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
-github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
-github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
-github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
-github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
-github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
-github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
-github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
-github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
-github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
-github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
-github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
-github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
-github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
-github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
-github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
-github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
-github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
-github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
-github.com/containerd/containerd v1.5.5/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
-github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
-github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
-github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
-github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
-github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
-github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
-github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
-github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
-github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
-github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
-github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
-github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
-github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
-github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
-github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
-github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
-github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
-github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
-github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
+github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
+github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
+github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
-github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
-github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
-github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
-github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
-github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
-github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
-github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v24.0.5-0.20230721180626-a61e2b4c9c5f+incompatible h1:vGLHGz5kxqgyHLchV0OAXgH6j/QjKcZfWeF+1XN2GCg=
-github.com/docker/docker v24.0.5-0.20230721180626-a61e2b4c9c5f+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
+github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
+github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0=
+github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
-github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
-github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
-github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
-github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
-github.com/go-redis/redis/v8 v8.11.3/go.mod h1:xNJ9xDG09FsIPwh3bWdk+0oDWHbtF9rPN0F/oD9XeKc=
-github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
-github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
-github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/goccy/go-yaml v1.9.5 h1:Eh/+3uk9kLxG4koCX6lRMAPS1OaMSAi+FJcya0INdB0=
-github.com/goccy/go-yaml v1.9.5/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA=
-github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
-github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
-github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
+github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/goccy/go-yaml v1.11.3 h1:B3W9IdWbvrUu2OYQGwvU1nZtvMQJPBKgBUuweJjLj6I=
+github.com/goccy/go-yaml v1.11.3/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/icinga/icinga-testing v0.0.0-20220516144008-9600081b7a69 h1:M5KN3s3TuHpGPnP78h5cFogtQrywapFIaYfvohQHc7I=
-github.com/icinga/icinga-testing v0.0.0-20220516144008-9600081b7a69/go.mod h1:ZP0pyqhmrRwwQ6FpAfz7UZMgmH7i3vOjEOm9JcFwOw0=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
-github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w=
-github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/icinga/icinga-testing v0.0.0-20240322142451-494ccd6d03e8 h1:PI+39IY1BjN24JC3B6Jy0rhwm3hqC4SnQFxbZjXOaHk=
+github.com/icinga/icinga-testing v0.0.0-20240322142451-494ccd6d03e8/go.mod h1:xjNiwePgnSVKJWPG/iFG7pNOibU/OWp01Zdl08o+EeI=
+github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
+github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ=
-github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
-github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
-github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
-github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
-github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
-github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk=
-github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
+github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI=
+github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
-github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
-github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
-github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
-github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
-github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
-github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
-github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
+github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8=
+github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
-github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
-github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
-github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
-github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
-github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
-github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
-github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
-github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
-go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
-go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec=
-go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
-go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
+go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
+golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20221012112151-59b0eab1532e h1:/SJUJZl3kz7J5GzAx5lgaKvqKGd4OfzshwDMr6YJCC4=
golang.org/x/exp v0.0.0-20221012112151-59b0eab1532e/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
-golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
+golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
+golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210824181836-a4879c3d0e89/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
-gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
-gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
-gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
-k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
-k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
-k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
-k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
-k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
-k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
-k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
-k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
-k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
-k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
-k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
-k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
-k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
+gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
diff --git a/tests/history_bench_test.go b/tests/history_bench_test.go
index e1ea0d2..7f66acc 100644
--- a/tests/history_bench_test.go
+++ b/tests/history_bench_test.go
@@ -3,10 +3,10 @@ package icingadb_test
import (
"context"
"fmt"
- "github.com/go-redis/redis/v8"
"github.com/google/uuid"
"github.com/icinga/icinga-testing/utils"
"github.com/jmoiron/sqlx"
+ "github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
"strconv"
"testing"
@@ -27,10 +27,7 @@ func BenchmarkHistory(b *testing.B) {
}
func benchmarkHistory(b *testing.B, numComments int64) {
- m := it.MysqlDatabase()
- defer m.Cleanup()
- m.ImportIcingaDbSchema()
-
+ rdb := getDatabase(b)
r := it.RedisServer()
defer r.Cleanup()
n := it.Icinga2Node("master")
@@ -39,8 +36,8 @@ func benchmarkHistory(b *testing.B, numComments int64) {
err := n.Reload()
require.NoError(b, err, "icinga2 should reload without error")
- db, err := sqlx.Connect("mysql", m.DSN())
- require.NoError(b, err, "connecting to mysql")
+ db, err := sqlx.Connect(rdb.Driver(), rdb.DSN())
+ require.NoError(b, err, "connecting to database")
defer func() { _ = db.Close() }()
redisClient := r.Open()
@@ -97,7 +94,7 @@ func benchmarkHistory(b *testing.B, numComments int64) {
b.Logf("current stream length: %d", lastPending)
b.StartTimer()
- idb := it.IcingaDbInstance(r, m)
+ idb := it.IcingaDbInstance(r, rdb)
defer idb.Cleanup()
ticker := time.NewTicker(5 * time.Millisecond)
diff --git a/tests/history_test.go b/tests/history_test.go
index e720f02..8590bc3 100644
--- a/tests/history_test.go
+++ b/tests/history_test.go
@@ -6,16 +6,16 @@ import (
_ "embed"
"encoding/json"
"fmt"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icinga-testing/services"
"github.com/icinga/icinga-testing/utils"
"github.com/icinga/icinga-testing/utils/eventually"
"github.com/icinga/icinga-testing/utils/pki"
"github.com/jmoiron/sqlx"
+ "github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
- "io/ioutil"
+ "io"
"math"
"net/http"
"sort"
@@ -143,7 +143,7 @@ func testHistory(t *testing.T, numNodes int) {
}, 15*time.Second, 200*time.Millisecond)
db, err := sqlx.Connect(rdb.Driver(), rdb.DSN())
- require.NoError(t, err, "connecting to mysql")
+ require.NoError(t, err, "connecting to database")
t.Cleanup(func() { _ = db.Close() })
client := nodes[0].IcingaClient
@@ -717,7 +717,7 @@ func processCheckResult(t *testing.T, client *utils.Icinga2Client, hostname stri
response, err := client.PostJson("/v1/actions/process-check-result", bytes.NewBuffer(reqBody))
require.NoError(t, err, "process-check-result")
if !assert.Equal(t, 200, response.StatusCode, "process-check-result") {
- body, err := ioutil.ReadAll(response.Body)
+ body, err := io.ReadAll(response.Body)
require.NoError(t, err, "reading process-check-result response")
it.Logger(t).Error("process-check-result", zap.ByteString("api-response", body))
t.FailNow()
diff --git a/tests/internal/utils/redis.go b/tests/internal/utils/redis.go
index da3615d..eaf2c2b 100644
--- a/tests/internal/utils/redis.go
+++ b/tests/internal/utils/redis.go
@@ -4,8 +4,8 @@ import (
"context"
"encoding/hex"
"encoding/json"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icinga-testing/services"
+ "github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
"testing"
"time"
diff --git a/tests/object_sync_test.go b/tests/object_sync_test.go
index e2455ec..23d2c96 100644
--- a/tests/object_sync_test.go
+++ b/tests/object_sync_test.go
@@ -6,13 +6,13 @@ import (
"database/sql"
_ "embed"
"fmt"
- "github.com/go-redis/redis/v8"
"github.com/icinga/icinga-testing/services"
"github.com/icinga/icinga-testing/utils"
"github.com/icinga/icinga-testing/utils/eventually"
localutils "github.com/icinga/icingadb/tests/internal/utils"
"github.com/icinga/icingadb/tests/internal/value"
"github.com/jmoiron/sqlx"
+ "github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
diff --git a/tests/sla_test.go b/tests/sla_test.go
index 6fa3a0e..597f5a0 100644
--- a/tests/sla_test.go
+++ b/tests/sla_test.go
@@ -18,15 +18,14 @@ import (
)
func TestSla(t *testing.T) {
- m := it.MysqlDatabaseT(t)
- m.ImportIcingaDbSchema()
+ rdb := getDatabase(t)
r := it.RedisServerT(t)
i := it.Icinga2NodeT(t, "master")
i.EnableIcingaDb(r)
err := i.Reload()
require.NoError(t, err, "icinga2 should reload without error")
- it.IcingaDbInstanceT(t, r, m)
+ it.IcingaDbInstanceT(t, r, rdb)
client := i.ApiClient()
@@ -109,8 +108,8 @@ func TestSla(t *testing.T) {
assert.Equal(t, 3, len(stateChanges), "there should be three hard state changes")
- db, err := sqlx.Connect("mysql", m.DSN())
- require.NoError(t, err, "connecting to mysql")
+ db, err := sqlx.Connect(rdb.Driver(), rdb.DSN())
+ require.NoError(t, err, "connecting to database")
defer func() { _ = db.Close() }()
type Row struct {
@@ -248,8 +247,8 @@ func TestSla(t *testing.T) {
End int64 `db:"downtime_end"`
}
- db, err := sqlx.Connect("mysql", m.DSN())
- require.NoError(t, err, "connecting to mysql")
+ db, err := sqlx.Connect(rdb.Driver(), rdb.DSN())
+ require.NoError(t, err, "connecting to database")
defer func() { _ = db.Close() }()
if !o.Fixed {
diff --git a/tests/sql/sla_test.go b/tests/sql/sla_test.go
index 8a89850..de2bace 100644
--- a/tests/sql/sla_test.go
+++ b/tests/sql/sla_test.go
@@ -48,6 +48,42 @@ func TestSla(t *testing.T) {
End: 2000,
Expected: 60.0,
}, {
+ Name: "MultipleStateChangesDecimalsOddNumbers",
+ // Test flapping again, also that calculations are rounded correctly including decimal places.
+ Events: []SlaHistoryEvent{
+ &State{Time: 1000, State: 2, PreviousState: 99}, // -2.3%
+ &State{Time: 1023, State: 0, PreviousState: 2},
+ &State{Time: 1100, State: 2, PreviousState: 0}, // -14.2%
+ &State{Time: 1242, State: 0, PreviousState: 2},
+ &State{Time: 1300, State: 2, PreviousState: 0}, // -0.7%
+ &State{Time: 1307, State: 0, PreviousState: 2},
+ &State{Time: 1400, State: 2, PreviousState: 0}, // -26.6%
+ &State{Time: 1666, State: 0, PreviousState: 2},
+ },
+ Start: 1000,
+ End: 2000,
+ Expected: 56.2,
+ }, {
+ Name: "MultipleStateChangesDecimalsFractionOneThird",
+ // Test decimal representation of a fraction including precision and scale.
+ Events: []SlaHistoryEvent{
+ &State{Time: 1000, State: 2, PreviousState: 99}, // -33.3..%
+ &State{Time: 1100, State: 0, PreviousState: 2},
+ },
+ Start: 1000,
+ End: 1300,
+ Expected: 66.6667,
+ }, {
+ Name: "MultipleStateChangesDecimalsFractionSeventhPart",
+ // Test decimal representation of a fraction including precision and scale.
+ Events: []SlaHistoryEvent{
+ &State{Time: 1000, State: 2, PreviousState: 99}, // -85.7142..%
+ &State{Time: 1600, State: 0, PreviousState: 2},
+ },
+ Start: 1000,
+ End: 1700,
+ Expected: 14.2857,
+ }, {
Name: "OverlappingDowntimesAndProblems",
// SLA should be 90%:
// 1000..1100: OK, no downtime