summaryrefslogtreecommitdiffstats
path: root/vendor/memchr
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/memchr')
-rw-r--r--vendor/memchr/.cargo-checksum.json2
-rw-r--r--vendor/memchr/Cargo.toml29
-rw-r--r--vendor/memchr/README.md25
-rw-r--r--vendor/memchr/build.rs88
-rwxr-xr-xvendor/memchr/scripts/make-byte-frequency-table74
-rw-r--r--vendor/memchr/src/arch/aarch64/memchr.rs137
-rw-r--r--vendor/memchr/src/arch/aarch64/mod.rs7
-rw-r--r--vendor/memchr/src/arch/aarch64/neon/memchr.rs1031
-rw-r--r--vendor/memchr/src/arch/aarch64/neon/mod.rs6
-rw-r--r--vendor/memchr/src/arch/aarch64/neon/packedpair.rs236
-rw-r--r--vendor/memchr/src/arch/all/memchr.rs1015
-rw-r--r--vendor/memchr/src/arch/all/mod.rs236
-rw-r--r--vendor/memchr/src/arch/all/packedpair/default_rank.rs (renamed from vendor/memchr/src/memmem/byte_frequencies.rs)2
-rw-r--r--vendor/memchr/src/arch/all/packedpair/mod.rs359
-rw-r--r--vendor/memchr/src/arch/all/rabinkarp.rs390
-rw-r--r--vendor/memchr/src/arch/all/shiftor.rs89
-rw-r--r--vendor/memchr/src/arch/all/twoway.rs (renamed from vendor/memchr/src/memmem/twoway.rs)327
-rw-r--r--vendor/memchr/src/arch/generic/memchr.rs1214
-rw-r--r--vendor/memchr/src/arch/generic/mod.rs14
-rw-r--r--vendor/memchr/src/arch/generic/packedpair.rs317
-rw-r--r--vendor/memchr/src/arch/mod.rs16
-rw-r--r--vendor/memchr/src/arch/wasm32/memchr.rs137
-rw-r--r--vendor/memchr/src/arch/wasm32/mod.rs7
-rw-r--r--vendor/memchr/src/arch/wasm32/simd128/memchr.rs1020
-rw-r--r--vendor/memchr/src/arch/wasm32/simd128/mod.rs6
-rw-r--r--vendor/memchr/src/arch/wasm32/simd128/packedpair.rs229
-rw-r--r--vendor/memchr/src/arch/x86_64/avx2/memchr.rs1352
-rw-r--r--vendor/memchr/src/arch/x86_64/avx2/mod.rs6
-rw-r--r--vendor/memchr/src/arch/x86_64/avx2/packedpair.rs272
-rw-r--r--vendor/memchr/src/arch/x86_64/memchr.rs335
-rw-r--r--vendor/memchr/src/arch/x86_64/mod.rs8
-rw-r--r--vendor/memchr/src/arch/x86_64/sse2/memchr.rs1077
-rw-r--r--vendor/memchr/src/arch/x86_64/sse2/mod.rs6
-rw-r--r--vendor/memchr/src/arch/x86_64/sse2/packedpair.rs232
-rw-r--r--vendor/memchr/src/cow.rs62
-rw-r--r--vendor/memchr/src/ext.rs52
-rw-r--r--vendor/memchr/src/lib.rs90
-rw-r--r--vendor/memchr/src/macros.rs20
-rw-r--r--vendor/memchr/src/memchr.rs903
-rw-r--r--vendor/memchr/src/memchr/c.rs44
-rw-r--r--vendor/memchr/src/memchr/fallback.rs329
-rw-r--r--vendor/memchr/src/memchr/iter.rs173
-rw-r--r--vendor/memchr/src/memchr/mod.rs410
-rw-r--r--vendor/memchr/src/memchr/naive.rs25
-rw-r--r--vendor/memchr/src/memchr/x86/avx.rs755
-rw-r--r--vendor/memchr/src/memchr/x86/mod.rs148
-rw-r--r--vendor/memchr/src/memchr/x86/sse2.rs791
-rw-r--r--vendor/memchr/src/memchr/x86/sse42.rs72
-rw-r--r--vendor/memchr/src/memmem/genericsimd.rs266
-rw-r--r--vendor/memchr/src/memmem/mod.rs830
-rw-r--r--vendor/memchr/src/memmem/prefilter/fallback.rs122
-rw-r--r--vendor/memchr/src/memmem/prefilter/genericsimd.rs207
-rw-r--r--vendor/memchr/src/memmem/prefilter/mod.rs570
-rw-r--r--vendor/memchr/src/memmem/prefilter/wasm.rs39
-rw-r--r--vendor/memchr/src/memmem/prefilter/x86/avx.rs46
-rw-r--r--vendor/memchr/src/memmem/prefilter/x86/mod.rs5
-rw-r--r--vendor/memchr/src/memmem/prefilter/x86/sse.rs42
-rw-r--r--vendor/memchr/src/memmem/rabinkarp.rs233
-rw-r--r--vendor/memchr/src/memmem/rarebytes.rs136
-rw-r--r--vendor/memchr/src/memmem/searcher.rs1030
-rw-r--r--vendor/memchr/src/memmem/util.rs88
-rw-r--r--vendor/memchr/src/memmem/vector.rs131
-rw-r--r--vendor/memchr/src/memmem/wasm.rs75
-rw-r--r--vendor/memchr/src/memmem/x86/avx.rs139
-rw-r--r--vendor/memchr/src/memmem/x86/mod.rs2
-rw-r--r--vendor/memchr/src/memmem/x86/sse.rs89
-rw-r--r--vendor/memchr/src/tests/memchr/iter.rs230
-rw-r--r--vendor/memchr/src/tests/memchr/memchr.rs134
-rw-r--r--vendor/memchr/src/tests/memchr/mod.rs314
-rw-r--r--vendor/memchr/src/tests/memchr/naive.rs33
-rw-r--r--vendor/memchr/src/tests/memchr/prop.rs321
-rw-r--r--vendor/memchr/src/tests/memchr/simple.rs23
-rw-r--r--vendor/memchr/src/tests/memchr/testdata.rs351
-rw-r--r--vendor/memchr/src/tests/mod.rs18
-rw-r--r--vendor/memchr/src/tests/packedpair.rs216
-rw-r--r--vendor/memchr/src/tests/substring/mod.rs232
-rw-r--r--vendor/memchr/src/tests/substring/naive.rs45
-rw-r--r--vendor/memchr/src/tests/substring/prop.rs126
-rw-r--r--vendor/memchr/src/vector.rs515
79 files changed, 13978 insertions, 6805 deletions
diff --git a/vendor/memchr/.cargo-checksum.json b/vendor/memchr/.cargo-checksum.json
index f49638022..9ebbb0ab0 100644
--- a/vendor/memchr/.cargo-checksum.json
+++ b/vendor/memchr/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"fdeda7d32fa12e4a1589d13c74ae5fd4f1065d0219ba73f8492e28248d84d146","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"51d941627e004588863b137918e908e34c4d599d12e03afd3e489e2bb61e3704","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","build.rs":"5638d9b60d40f44db96767ce32246de42158571364cce92531a85307ac7eda6c","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","scripts/make-byte-frequency-table":"21d1ded41fe5a780507bb88e1910d471b4081cc626a48891a408712e45b7b2bf","src/cow.rs":"a23c3b009e5215b5c3ac46627a5dd844235bef0136d76b3fc1eeeb744565c125","src/lib.rs":"9430cd37b13399df8f8c27a752ccdf6422a563e24171d1b4802424f9193a8f37","src/memchr/c.rs":"34f7caf79316f4b03908832fdbd4aff367f2bc30eae291478cc5a0a108ce6e76","src/memchr/fallback.rs":"48764f18b7ff1f00a9ac1c4ed8ec96ad11f7b09b2d062a8ed3fe81160add627d","src/memchr/iter.rs":"61463e7fa22ca8f212c2cbfb882af0c87b0fb1bc6b4676678a4822a581ec1037","src/memchr/mod.rs":"d5bfc881c7c089e1a0825209a4d21c3f792f38c6f16f3bc715d0d539477376b6","src/memchr/naive.rs":"c7453bc99cc4e58eb37cf5a50c88688833e50a270ee1849baefddb8acc0ccd94","src/memchr/x86/avx.rs":"3c2750174ce7ff033daa4096e7961bbee9a2da898068266b27dee22ef8cfddad","src/memchr/x86/mod.rs":"a642d5aefdb7452ead4ab7946b5c6cfb6cc6df636dcd0ebbd6f5e6e1ac8305c0","src/memchr/x86/sse2.rs":"79ede1aba71a655e86eb5873d682c5da26933bffa4fffd7042a2313f18cf4675","src/memchr/x86/sse42.rs":"de4c6f354dbfec170876cddb8d9157b35928f96ed2339a0c5d094cc953a2f52d","src/memmem/byte_frequencies.rs":"2fb85b381c038c1e44ce94294531cdcd339dca48b1e61f41455666e802cbbc9e","src/memmem/genericsimd.rs":"9ce7283db0994438eb6df2bea6ad984e80512b6f643ebae7ae7d82eb5d39fa11","src/memmem/mod.rs":"949fb8e11a23030d59b34fd8c7c196150f133e909a8448705c77a751c436907d","src/memmem/prefilter/fallback.rs":"d32248c41aa09701c2410c52f948bbe009dd1b13a01b444ce0fb8c4b4e404ede","src/memmem/prefilter/genericsimd.rs":"57d5523cf0299b37ef1dd1b351e3d387d5070f2f7ecffc9a9ca66528101ebd3f","src/memmem/prefilter/mod.rs":"ad8b4ac72c025f11d6b641c5fc0888468112758dcdc6bb72b43f932d2005ea4e","src/memmem/prefilter/wasm.rs":"14f684412fca35445a94760a6973d772dfd22d329ebae3b52b525d2a1f3acd63","src/memmem/prefilter/x86/avx.rs":"e344cae36a88b59c07a1c1d395edeb9c636a399e1528ce69b2bc7c94d8d8bb0b","src/memmem/prefilter/x86/mod.rs":"df2d84b23b22574383c281d33671a121b5faf7b1a48dd6f67c3085cd02cd4498","src/memmem/prefilter/x86/sse.rs":"daa648fc2a90d37299803a80d632e8a47a30ce8719d0ac2a2ea2cde3b30b6fef","src/memmem/rabinkarp.rs":"9b44eb092524a51792eba4deaca6c6d3cbc51db98cb548ea4fa7e5d8988cc71a","src/memmem/rarebytes.rs":"571082c71fc3dca5e4304171d41fb3c44e241df6dcd88bac4d7a15b52f9521e0","src/memmem/twoway.rs":"102f8bbb29696d5656cd2f5a1769a3af96d044fb09972881455cfb6424d6b50a","src/memmem/util.rs":"0194d40b912137e2352863af9cc1c0273baf97fdf6b27799628680846c06febd","src/memmem/vector.rs":"96e6f45f8ad11a822c4f18393839225d7f40f898ad657e109ba1b3288af0ef8f","src/memmem/wasm.rs":"87da03c964f054db30cc972d07a74e8902ec1248e2338ecd1dbac430f43fffc2","src/memmem/x86/avx.rs":"de85dbc415603c844baf94fbc92d676a738dd4b99246be468bd5f7be5921b25f","src/memmem/x86/mod.rs":"5012fca41b91caf229278aa221e8dd514ede497fe4938d64562d03fef2fc46e6","src/memmem/x86/sse.rs":"148a40c0952aca8b16d9eb3e724a5b9b60693bc7b2bcc5209bcc43c94faf560a","src/tests/memchr/iter.rs":"b68c7ecdb6222c5dbf61212e6863f78f98ad343868a74cb8612692fc790240b2","src/tests/memchr/memchr.rs":"09589c5899324c9b26ea4513c80389a2ffdf6ddc460031e2ca8da43bd493ae3f","src/tests/memchr/mod.rs":"29e0855f946c7babf603b3d610a29235a56a26a4c867fef0768542388eac4c95","src/tests/memchr/simple.rs":"b9997903ede972272c01c1750522a20692a28488cc7c5cf745ea83ff96d65fe3","src/tests/memchr/testdata.rs":"3e34377fe60eca3687d1ebc66127bd631af27ceaccc8f08806a293199b69a83f","src/tests/mod.rs":"9054a2a2f9af140f305ca29155d942fafbac9fb0874067611adc8a5990546be4","src/tests/x86_64-soft_float.json":"c0e416487fe9b4809534edb7db2a9eff3453dc40d9f1e23362c37f45a77ec717"},"package":"2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"} \ No newline at end of file
+{"files":{"COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"ec5b6a33c10b0c455159687c391ee3bbcda6c272156d965203c06f5385b4401d","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"b77bcf5362c0b4d5500f6bcc488747daf0020bc83862b02ca20e4d8113cfb5b2","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/arch/aarch64/memchr.rs":"5bb70f915084e629d940dbc322f5b9096b2e658cf63fea8a2f6e7550412e73a0","src/arch/aarch64/mod.rs":"44cd1a614bd66f1e66fc86c541d3c3b8d3a14a644c13e8bf816df3f555eac2d4","src/arch/aarch64/neon/memchr.rs":"e8c00b8fb2c7e2711832ae3cedefe59f32ebedd7dfa4d0ec6de2a566c979daea","src/arch/aarch64/neon/mod.rs":"eab6d56c2b2354db4ee395f40282cd49f97e2ab853547be5de6e65fbe1b2f634","src/arch/aarch64/neon/packedpair.rs":"fbdfdbfaf7b76b234db261fbe55a55c4479d32cdc65a654d60417c2d1c237849","src/arch/all/memchr.rs":"916d058b8b75b795996608b59767e348c6d5ae50061719c595ea409aefca36b9","src/arch/all/mod.rs":"b3190c1c48b23efe4b5e320aebdf8f0260a2cb2f63c0deba95ad0ef8219695d7","src/arch/all/packedpair/default_rank.rs":"abffd1b5b8b7a3be95c03dd1105b905c246a379854dc56f1e846ea7c4408f2c7","src/arch/all/packedpair/mod.rs":"292b66042c5b5c78bba33db6526aeae6904db803d601fcdd29032b87b3eb3754","src/arch/all/rabinkarp.rs":"236f69c04b90c14c253ae6c8d9b78150b4a56df75bb50af6d63b15145668b7cc","src/arch/all/shiftor.rs":"0d79117f52a1e4795843603a3bb0b45397df4ad5e4184bbc923658dab9dc3b5f","src/arch/all/twoway.rs":"47c97a265bfbafde90a618946643d3e97dfd9a85f01aa4ac758cd4c1573a450d","src/arch/generic/memchr.rs":"88290761bab740878401e914d71866da6501cdcef53d1249ec6fda4c7f9c12ae","src/arch/generic/mod.rs":"1dd75f61e0ea2563b8205a08aaa7b55500130aa331d18b9e9f995724b66c7a39","src/arch/generic/packedpair.rs":"a4a6efb29877ced9cf4c4e5ae9f36a79f019a16b831f2b9424899a1513d458ad","src/arch/mod.rs":"6dbd9e0b1b89fecb9faac5df6edfc87e24607e9099136aa831f3f056b14e22db","src/arch/wasm32/memchr.rs":"bfaaeca702cc32e605a06d5078d26ac59263d3c4eb04f9756e6be5e2850c3d0d","src/arch/wasm32/mod.rs":"a20377aa8fe07d68594879101dc73061e4f51d9c8d812b593b1f376e3c8add79","src/arch/wasm32/simd128/memchr.rs":"bac2c4c43fe710c83a6f2b1118fede043be89dd821d4b532907f129f09fdb5cf","src/arch/wasm32/simd128/mod.rs":"c157b373faedbfd65323be432e25bc411d97aa1b7bc58e76048614c7b2bf3bf6","src/arch/wasm32/simd128/packedpair.rs":"47e7875f1a0b502f3f30ddfd9257ed7ad4568fb7d968b5e6c01ba9e2aab2a459","src/arch/x86_64/avx2/memchr.rs":"576ec0c30f49874f7fd9f6caeb490d56132c0fbbaa4d877b1aa532cafce19323","src/arch/x86_64/avx2/mod.rs":"0033d1b712d0b10f0f273ef9aa8caa53e05e49f4c56a64f39af0b9df97eec584","src/arch/x86_64/avx2/packedpair.rs":"87b69cb4301815906127db4f6370f572c7c5d5dad35c0946c00ad888dbcaec8c","src/arch/x86_64/memchr.rs":"99a1dbe4156d498e6f910d06d3d3b31e7f6d06dff7d13a4c51b33a02b7e2fba9","src/arch/x86_64/mod.rs":"61b2aa876942fd3e78714c2ae21e356c8634545c06995020f443fa50218df027","src/arch/x86_64/sse2/memchr.rs":"68fc3b8f9eddf82192979c3aa11e5141f085cbb993c49c340558719a904679dc","src/arch/x86_64/sse2/mod.rs":"38b70ae52a64ec974dbb91d04d6ca8013d9e06d1fe4af852206bbc2faf1c59aa","src/arch/x86_64/sse2/packedpair.rs":"241ea981d8eea6024769f1c9375f726a9bb9700160c5857781d4befd9f5ef55d","src/cow.rs":"34eddd02cb82cc2d5a2c640891d64efe332dabcc1eea5115764200d8f46b66f7","src/ext.rs":"c472bcc41a7ef48980d976e954e87ef9fdfdfd30ac0199b959cc7e5b9d563ab3","src/lib.rs":"4e56613894535a80d669dda77697fb8c23769ede5e3fcedc5d999617b5b1d094","src/macros.rs":"3e4b39252bfa471fad384160a43f113ebfec7bec46a85d16f006622881dd2081","src/memchr.rs":"36f1c03304261877cd7f75c7ed8f7daff7a5c570cedce375e38e9b1ca44467f7","src/memmem/mod.rs":"7297b60ac7d7cb50ba44bf58a7cde7d32819237f2ec45f311d20bfe9379d4c78","src/memmem/searcher.rs":"69c38fb33d8f1a2a26769a81e514428240c8f8f15cea5302873d90b80391dd89","src/tests/memchr/mod.rs":"269f8e4b4f7f5ea458f27a3c174eb1020ffb2484eeba9464170beb51747df69b","src/tests/memchr/naive.rs":"6a0bee033e5edfb5b1d5769a5fa1c78388f7e9ff7bb91cb67f0ad029289e00e7","src/tests/memchr/prop.rs":"7bf7435087fbf08c5014c216b76575349735590d6b1d0e448921a1dc17bc0ea7","src/tests/mod.rs":"7cec8f809e279310a465c6a7725087970f219a676cc76c83de30c695bb490740","src/tests/packedpair.rs":"b02ec4fbb61a8653cb5f2268c31bc9168b8043347f2abdcc74081acf83b98e15","src/tests/substring/mod.rs":"c7660d10749363ac4687e7da2b5fda60768230425df8ba416c0c28b8d56a5c74","src/tests/substring/naive.rs":"df6f55d165382b8a53762ba4c324926cac13ebc62cde1805f4ce08740b326483","src/tests/substring/prop.rs":"38c15992609b5681a95d838ae6f2933e00a1219f2c971bfba245f96e0729fcdc","src/tests/x86_64-soft_float.json":"c0e416487fe9b4809534edb7db2a9eff3453dc40d9f1e23362c37f45a77ec717","src/vector.rs":"ef823ae8c54053780a0e7aeaee14b6c6ac2aea4567bf701ae8be137806c6d293"},"package":"5486aed0026218e61b8a01d5fbd5a0a134649abb71a0e53b7bc088529dced86e"} \ No newline at end of file
diff --git a/vendor/memchr/Cargo.toml b/vendor/memchr/Cargo.toml
index 630195281..b43898e29 100644
--- a/vendor/memchr/Cargo.toml
+++ b/vendor/memchr/Cargo.toml
@@ -10,17 +10,21 @@
# See Cargo.toml.orig for the original contents.
[package]
-edition = "2018"
+edition = "2021"
+rust-version = "1.60"
name = "memchr"
-version = "2.5.0"
+version = "2.6.2"
authors = [
"Andrew Gallant <jamslam@gmail.com>",
"bluss",
]
exclude = [
"/bench",
+ "/benchmarks",
"/.github",
"/fuzz",
+ "/scripts",
+ "/tmp",
]
description = "Safe interface to memchr."
homepage = "https://github.com/BurntSushi/memchr"
@@ -33,18 +37,21 @@ keywords = [
"strchr",
"string",
]
-license = "Unlicense/MIT"
+license = "Unlicense OR MIT"
repository = "https://github.com/BurntSushi/memchr"
+[package.metadata.docs.rs]
+rustdoc-args = ["--generate-link-to-definition"]
+
[profile.bench]
-debug = true
+debug = 2
[profile.release]
-debug = true
+debug = 2
[profile.test]
opt-level = 3
-debug = true
+debug = 2
[lib]
name = "memchr"
@@ -59,20 +66,22 @@ version = "1.0.0"
optional = true
package = "rustc-std-workspace-core"
-[dependencies.libc]
-version = "0.2.18"
+[dependencies.log]
+version = "0.4.20"
optional = true
-default-features = false
[dev-dependencies.quickcheck]
version = "1.0.3"
default-features = false
[features]
+alloc = []
default = ["std"]
+libc = []
+logging = ["dep:log"]
rustc-dep-of-std = [
"core",
"compiler_builtins",
]
-std = []
+std = ["alloc"]
use_std = ["std"]
diff --git a/vendor/memchr/README.md b/vendor/memchr/README.md
index 77a7a0f5b..54dabd585 100644
--- a/vendor/memchr/README.md
+++ b/vendor/memchr/README.md
@@ -35,30 +35,19 @@ memchr links to the standard library by default, but you can disable the
memchr = { version = "2", default-features = false }
```
-On x86 platforms, when the `std` feature is disabled, the SSE2 accelerated
-implementations will be used. When `std` is enabled, AVX accelerated
+On `x86_64` platforms, when the `std` feature is disabled, the SSE2 accelerated
+implementations will be used. When `std` is enabled, AVX2 accelerated
implementations will be used if the CPU is determined to support it at runtime.
-### Using libc
-
-`memchr` is a routine that is part of libc, although this crate does not use
-libc by default. Instead, it uses its own routines, which are either vectorized
-or generic fallback routines. In general, these should be competitive with
-what's in libc, although this has not been tested for all architectures. If
-using `memchr` from libc is desirable and a vectorized routine is not otherwise
-available in this crate, then enabling the `libc` feature will use libc's
-version of `memchr`.
-
-The rest of the functions in this crate, e.g., `memchr2` or `memrchr3` and the
-substring search routines, will always use the implementations in this crate.
-One exception to this is `memrchr`, which is an extension in `libc` found on
-Linux. On Linux, `memrchr` is used in precisely the same scenario as `memchr`,
-as described above.
+SIMD accelerated routines are also available on the `wasm32` and `aarch64`
+targets. The `std` feature is not required to use them.
+When a SIMD version is not available, then this crate falls back to
+[SWAR](https://en.wikipedia.org/wiki/SWAR) techniques.
### Minimum Rust version policy
-This crate's minimum supported `rustc` version is `1.41.1`.
+This crate's minimum supported `rustc` version is `1.60.0`.
The current policy is that the minimum Rust version required to use this crate
can be increased in minor version updates. For example, if `crate 1.0` requires
diff --git a/vendor/memchr/build.rs b/vendor/memchr/build.rs
deleted file mode 100644
index 584a60856..000000000
--- a/vendor/memchr/build.rs
+++ /dev/null
@@ -1,88 +0,0 @@
-use std::env;
-
-fn main() {
- enable_simd_optimizations();
- enable_libc();
-}
-
-// This adds various simd cfgs if this compiler and target support it.
-//
-// This can be disabled with RUSTFLAGS="--cfg memchr_disable_auto_simd", but
-// this is generally only intended for testing.
-//
-// On targets which don't feature SSE2, this is disabled, as LLVM wouln't know
-// how to work with SSE2 operands. Enabling SSE4.2 and AVX on SSE2-only targets
-// is not a problem. In that case, the fastest option will be chosen at
-// runtime.
-fn enable_simd_optimizations() {
- if is_env_set("CARGO_CFG_MEMCHR_DISABLE_AUTO_SIMD") {
- return;
- }
- let arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap();
- match &arch[..] {
- "x86_64" => {
- if !target_has_feature("sse2") {
- return;
- }
- println!("cargo:rustc-cfg=memchr_runtime_simd");
- println!("cargo:rustc-cfg=memchr_runtime_sse2");
- println!("cargo:rustc-cfg=memchr_runtime_sse42");
- println!("cargo:rustc-cfg=memchr_runtime_avx");
- }
- "wasm32" | "wasm64" => {
- if !target_has_feature("simd128") {
- return;
- }
- println!("cargo:rustc-cfg=memchr_runtime_simd");
- println!("cargo:rustc-cfg=memchr_runtime_wasm128");
- }
- _ => {}
- }
-}
-
-// This adds a `memchr_libc` cfg if and only if libc can be used, if no other
-// better option is available.
-//
-// This could be performed in the source code, but it's simpler to do it once
-// here and consolidate it into one cfg knob.
-//
-// Basically, we use libc only if its enabled and if we aren't targeting a
-// known bad platform. For example, wasm32 doesn't have a libc and the
-// performance of memchr on Windows is seemingly worse than the fallback
-// implementation.
-fn enable_libc() {
- const NO_ARCH: &'static [&'static str] = &["wasm32", "windows"];
- const NO_ENV: &'static [&'static str] = &["sgx"];
-
- if !is_feature_set("LIBC") {
- return;
- }
-
- let arch = match env::var("CARGO_CFG_TARGET_ARCH") {
- Err(_) => return,
- Ok(arch) => arch,
- };
- let env = match env::var("CARGO_CFG_TARGET_ENV") {
- Err(_) => return,
- Ok(env) => env,
- };
- if NO_ARCH.contains(&&*arch) || NO_ENV.contains(&&*env) {
- return;
- }
-
- println!("cargo:rustc-cfg=memchr_libc");
-}
-
-fn is_feature_set(name: &str) -> bool {
- is_env_set(&format!("CARGO_FEATURE_{}", name))
-}
-
-fn is_env_set(name: &str) -> bool {
- env::var_os(name).is_some()
-}
-
-fn target_has_feature(feature: &str) -> bool {
- env::var("CARGO_CFG_TARGET_FEATURE")
- .map(|features| features.contains(feature))
- .unwrap_or(false)
-}
diff --git a/vendor/memchr/scripts/make-byte-frequency-table b/vendor/memchr/scripts/make-byte-frequency-table
deleted file mode 100755
index 37eeca7b7..000000000
--- a/vendor/memchr/scripts/make-byte-frequency-table
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-
-# This does simple normalized frequency analysis on UTF-8 encoded text. The
-# result of the analysis is translated to a ranked list, where every byte is
-# assigned a rank. This list is written to src/freqs.rs.
-#
-# Currently, the frequencies are generated from the following corpuses:
-#
-# * The CIA world fact book
-# * The source code of rustc
-# * Septuaginta
-
-from __future__ import absolute_import, division, print_function
-
-import argparse
-from collections import Counter
-import sys
-
-preamble = '''
-// NOTE: The following code was generated by "scripts/frequencies.py", do not
-// edit directly
-'''.lstrip()
-
-
-def eprint(*args, **kwargs):
- kwargs['file'] = sys.stderr
- print(*args, **kwargs)
-
-
-def main():
- p = argparse.ArgumentParser()
- p.add_argument('corpus', metavar='FILE', nargs='+')
- args = p.parse_args()
-
- # Get frequency counts of each byte.
- freqs = Counter()
- for i in range(0, 256):
- freqs[i] = 0
-
- eprint('reading entire corpus into memory')
- corpus = []
- for fpath in args.corpus:
- corpus.append(open(fpath, 'rb').read())
-
- eprint('computing byte frequencies')
- for c in corpus:
- for byte in c:
- freqs[byte] += 1.0 / float(len(c))
-
- eprint('writing Rust code')
- # Get the rank of each byte. A lower rank => lower relative frequency.
- rank = [0] * 256
- for i, (byte, _) in enumerate(freqs.most_common()):
- # print(byte)
- rank[byte] = 255 - i
-
- # Forcefully set the highest rank possible for bytes that start multi-byte
- # UTF-8 sequences. The idea here is that a continuation byte will be more
- # discerning in a homogenous haystack.
- for byte in range(0xC0, 0xFF + 1):
- rank[byte] = 255
-
- # Now write Rust.
- olines = ['pub const BYTE_FREQUENCIES: [u8; 256] = [']
- for byte in range(256):
- olines.append(' %3d, // %r' % (rank[byte], chr(byte)))
- olines.append('];')
-
- print(preamble)
- print('\n'.join(olines))
-
-
-if __name__ == '__main__':
- main()
diff --git a/vendor/memchr/src/arch/aarch64/memchr.rs b/vendor/memchr/src/arch/aarch64/memchr.rs
new file mode 100644
index 000000000..e0053b2a2
--- /dev/null
+++ b/vendor/memchr/src/arch/aarch64/memchr.rs
@@ -0,0 +1,137 @@
+/*!
+Wrapper routines for `memchr` and friends.
+
+These routines choose the best implementation at compile time. (This is
+different from `x86_64` because it is expected that `neon` is almost always
+available for `aarch64` targets.)
+*/
+
+macro_rules! defraw {
+ ($ty:ident, $find:ident, $start:ident, $end:ident, $($needles:ident),+) => {{
+ #[cfg(target_feature = "neon")]
+ {
+ use crate::arch::aarch64::neon::memchr::$ty;
+
+ debug!("chose neon for {}", stringify!($ty));
+ debug_assert!($ty::is_available());
+ // SAFETY: We know that wasm memchr is always available whenever
+ // code is compiled for `aarch64` with the `neon` target feature
+ // enabled.
+ $ty::new_unchecked($($needles),+).$find($start, $end)
+ }
+ #[cfg(not(target_feature = "neon"))]
+ {
+ use crate::arch::all::memchr::$ty;
+
+ debug!(
+ "no neon feature available, using fallback for {}",
+ stringify!($ty),
+ );
+ $ty::new($($needles),+).$find($start, $end)
+ }
+ }}
+}
+
+/// memchr, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::find_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memchr_raw(
+ n1: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(One, find_raw, start, end, n1)
+}
+
+/// memrchr, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::rfind_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memrchr_raw(
+ n1: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(One, rfind_raw, start, end, n1)
+}
+
+/// memchr2, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Two::find_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memchr2_raw(
+ n1: u8,
+ n2: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(Two, find_raw, start, end, n1, n2)
+}
+
+/// memrchr2, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Two::rfind_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memrchr2_raw(
+ n1: u8,
+ n2: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(Two, rfind_raw, start, end, n1, n2)
+}
+
+/// memchr3, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Three::find_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memchr3_raw(
+ n1: u8,
+ n2: u8,
+ n3: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(Three, find_raw, start, end, n1, n2, n3)
+}
+
+/// memrchr3, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Three::rfind_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memrchr3_raw(
+ n1: u8,
+ n2: u8,
+ n3: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(Three, rfind_raw, start, end, n1, n2, n3)
+}
+
+/// Count all matching bytes, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::count_raw`.
+#[inline(always)]
+pub(crate) unsafe fn count_raw(
+ n1: u8,
+ start: *const u8,
+ end: *const u8,
+) -> usize {
+ defraw!(One, count_raw, start, end, n1)
+}
diff --git a/vendor/memchr/src/arch/aarch64/mod.rs b/vendor/memchr/src/arch/aarch64/mod.rs
new file mode 100644
index 000000000..7b3291257
--- /dev/null
+++ b/vendor/memchr/src/arch/aarch64/mod.rs
@@ -0,0 +1,7 @@
+/*!
+Vector algorithms for the `aarch64` target.
+*/
+
+pub mod neon;
+
+pub(crate) mod memchr;
diff --git a/vendor/memchr/src/arch/aarch64/neon/memchr.rs b/vendor/memchr/src/arch/aarch64/neon/memchr.rs
new file mode 100644
index 000000000..5fcc76237
--- /dev/null
+++ b/vendor/memchr/src/arch/aarch64/neon/memchr.rs
@@ -0,0 +1,1031 @@
+/*!
+This module defines 128-bit vector implementations of `memchr` and friends.
+
+The main types in this module are [`One`], [`Two`] and [`Three`]. They are for
+searching for one, two or three distinct bytes, respectively, in a haystack.
+Each type also has corresponding double ended iterators. These searchers are
+typically much faster than scalar routines accomplishing the same task.
+
+The `One` searcher also provides a [`One::count`] routine for efficiently
+counting the number of times a single byte occurs in a haystack. This is
+useful, for example, for counting the number of lines in a haystack. This
+routine exists because it is usually faster, especially with a high match
+count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its
+`Iterator::count` implementation to use this routine.)
+
+Only one, two and three bytes are supported because three bytes is about
+the point where one sees diminishing returns. Beyond this point and it's
+probably (but not necessarily) better to just use a simple `[bool; 256]` array
+or similar. However, it depends mightily on the specific work-load and the
+expected match frequency.
+*/
+
+use core::arch::aarch64::uint8x16_t;
+
+use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector};
+
+/// Finds all occurrences of a single byte in a haystack.
+#[derive(Clone, Copy, Debug)]
+pub struct One(generic::One<uint8x16_t>);
+
+impl One {
+ /// Create a new searcher that finds occurrences of the needle byte given.
+ ///
+ /// This particular searcher is specialized to use neon vector instructions
+ /// that typically make it quite fast.
+ ///
+ /// If neon is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle: u8) -> Option<One> {
+ if One::is_available() {
+ // SAFETY: we check that neon is available above.
+ unsafe { Some(One::new_unchecked(needle)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to neon vectors and routines without
+ /// checking that neon is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute `neon` instructions
+ /// in the current environment.
+ ///
+ /// Note that it is a common misconception that if one compiles for an
+ /// `x86_64` target, then they therefore automatically have access to neon
+ /// instructions. While this is almost always the case, it isn't true in
+ /// 100% of cases.
+ #[target_feature(enable = "neon")]
+ #[inline]
+ pub unsafe fn new_unchecked(needle: u8) -> One {
+ One(generic::One::new(needle))
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`One::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `One::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(target_feature = "neon")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "neon"))]
+ {
+ false
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Counts all occurrences of this byte in the given haystack.
+ #[inline]
+ pub fn count(&self, haystack: &[u8]) -> usize {
+ // SAFETY: All of our pointers are derived directly from a borrowed
+ // slice, which is guaranteed to be valid.
+ unsafe {
+ let start = haystack.as_ptr();
+ let end = start.add(haystack.len());
+ self.count_raw(start, end)
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < uint8x16_t::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ });
+ }
+ // SAFETY: Building a `One` means it's safe to call 'neon' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.find_raw_impl(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < uint8x16_t::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::rev_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ });
+ }
+ // SAFETY: Building a `One` means it's safe to call 'neon' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.rfind_raw_impl(start, end)
+ }
+
+ /// Like `count`, but accepts and returns raw pointers.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize {
+ if start >= end {
+ return 0;
+ }
+ if end.distance(start) < uint8x16_t::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::count_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ });
+ }
+ // SAFETY: Building a `One` means it's safe to call 'neon' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.count_raw_impl(start, end)
+ }
+
+ /// Execute a search using neon vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a neon vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `neon` routines.)
+ #[target_feature(enable = "neon")]
+ #[inline]
+ unsafe fn find_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.find_raw(start, end)
+ }
+
+ /// Execute a search using neon vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a neon vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `neon` routines.)
+ #[target_feature(enable = "neon")]
+ #[inline]
+ unsafe fn rfind_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.rfind_raw(start, end)
+ }
+
+ /// Execute a count using neon vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::count_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a neon vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `neon` routines.)
+ #[target_feature(enable = "neon")]
+ #[inline]
+ unsafe fn count_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> usize {
+ self.0.count_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle byte in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> {
+ OneIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of a single byte in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`One::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`One`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct OneIter<'a, 'h> {
+ searcher: &'a One,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for OneIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count(|s, e| {
+ // SAFETY: We rely on our generic iterator to return valid start
+ // and end pointers.
+ unsafe { self.searcher.count_raw(s, e) }
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {}
+
+/// Finds all occurrences of two bytes in a haystack.
+///
+/// That is, this reports matches of one of two possible bytes. For example,
+/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`,
+/// `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub struct Two(generic::Two<uint8x16_t>);
+
+impl Two {
+ /// Create a new searcher that finds occurrences of the needle bytes given.
+ ///
+ /// This particular searcher is specialized to use neon vector instructions
+ /// that typically make it quite fast.
+ ///
+ /// If neon is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle1: u8, needle2: u8) -> Option<Two> {
+ if Two::is_available() {
+ // SAFETY: we check that neon is available above.
+ unsafe { Some(Two::new_unchecked(needle1, needle2)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to neon vectors and routines without
+ /// checking that neon is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute `neon` instructions
+ /// in the current environment.
+ ///
+ /// Note that it is a common misconception that if one compiles for an
+ /// `x86_64` target, then they therefore automatically have access to neon
+ /// instructions. While this is almost always the case, it isn't true in
+ /// 100% of cases.
+ #[target_feature(enable = "neon")]
+ #[inline]
+ pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two {
+ Two(generic::Two::new(needle1, needle2))
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Two::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `Two::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(target_feature = "neon")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "neon"))]
+ {
+ false
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < uint8x16_t::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.0.needle1() || b == self.0.needle2()
+ });
+ }
+ // SAFETY: Building a `Two` means it's safe to call 'neon' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.find_raw_impl(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < uint8x16_t::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::rev_byte_by_byte(start, end, |b| {
+ b == self.0.needle1() || b == self.0.needle2()
+ });
+ }
+ // SAFETY: Building a `Two` means it's safe to call 'neon' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.rfind_raw_impl(start, end)
+ }
+
+ /// Execute a search using neon vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Two::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a neon vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Two`, which can only be constructed
+ /// when it is safe to call `neon` routines.)
+ #[target_feature(enable = "neon")]
+ #[inline]
+ unsafe fn find_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.find_raw(start, end)
+ }
+
+ /// Execute a search using neon vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Two::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a neon vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Two`, which can only be constructed
+ /// when it is safe to call `neon` routines.)
+ #[target_feature(enable = "neon")]
+ #[inline]
+ unsafe fn rfind_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.rfind_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle bytes in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> {
+ TwoIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of two possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`Two::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`Two`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct TwoIter<'a, 'h> {
+ searcher: &'a Two,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for TwoIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {}
+
+/// Finds all occurrences of three bytes in a haystack.
+///
+/// That is, this reports matches of one of three possible bytes. For example,
+/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets
+/// `0`, `2`, `3`, `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub struct Three(generic::Three<uint8x16_t>);
+
+impl Three {
+ /// Create a new searcher that finds occurrences of the needle bytes given.
+ ///
+ /// This particular searcher is specialized to use neon vector instructions
+ /// that typically make it quite fast.
+ ///
+ /// If neon is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option<Three> {
+ if Three::is_available() {
+ // SAFETY: we check that neon is available above.
+ unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to neon vectors and routines without
+ /// checking that neon is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute `neon` instructions
+ /// in the current environment.
+ ///
+ /// Note that it is a common misconception that if one compiles for an
+ /// `x86_64` target, then they therefore automatically have access to neon
+ /// instructions. While this is almost always the case, it isn't true in
+ /// 100% of cases.
+ #[target_feature(enable = "neon")]
+ #[inline]
+ pub unsafe fn new_unchecked(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ ) -> Three {
+ Three(generic::Three::new(needle1, needle2, needle3))
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Three::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `Three::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(target_feature = "neon")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "neon"))]
+ {
+ false
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < uint8x16_t::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ || b == self.0.needle2()
+ || b == self.0.needle3()
+ });
+ }
+ // SAFETY: Building a `Three` means it's safe to call 'neon' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.find_raw_impl(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < uint8x16_t::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::rev_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ || b == self.0.needle2()
+ || b == self.0.needle3()
+ });
+ }
+ // SAFETY: Building a `Three` means it's safe to call 'neon' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.rfind_raw_impl(start, end)
+ }
+
+ /// Execute a search using neon vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Three::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a neon vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Three`, which can only be constructed
+ /// when it is safe to call `neon` routines.)
+ #[target_feature(enable = "neon")]
+ #[inline]
+ unsafe fn find_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.find_raw(start, end)
+ }
+
+ /// Execute a search using neon vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Three::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a neon vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Three`, which can only be constructed
+ /// when it is safe to call `neon` routines.)
+ #[target_feature(enable = "neon")]
+ #[inline]
+ unsafe fn rfind_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.rfind_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle byte in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> {
+ ThreeIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of three possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`Three::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`Three`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct ThreeIter<'a, 'h> {
+ searcher: &'a Three,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for ThreeIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ define_memchr_quickcheck!(super);
+
+ #[test]
+ fn forward_one() {
+ crate::tests::memchr::Runner::new(1).forward_iter(
+ |haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_one() {
+ crate::tests::memchr::Runner::new(1).reverse_iter(
+ |haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).rev().collect())
+ },
+ )
+ }
+
+ #[test]
+ fn count_one() {
+ crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).count())
+ })
+ }
+
+ #[test]
+ fn forward_two() {
+ crate::tests::memchr::Runner::new(2).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(Two::new(n1, n2)?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_two() {
+ crate::tests::memchr::Runner::new(2).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(Two::new(n1, n2)?.iter(haystack).rev().collect())
+ },
+ )
+ }
+
+ #[test]
+ fn forward_three() {
+ crate::tests::memchr::Runner::new(3).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(Three::new(n1, n2, n3)?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_three() {
+ crate::tests::memchr::Runner::new(3).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect())
+ },
+ )
+ }
+}
diff --git a/vendor/memchr/src/arch/aarch64/neon/mod.rs b/vendor/memchr/src/arch/aarch64/neon/mod.rs
new file mode 100644
index 000000000..ccf9cf81f
--- /dev/null
+++ b/vendor/memchr/src/arch/aarch64/neon/mod.rs
@@ -0,0 +1,6 @@
+/*!
+Algorithms for the `aarch64` target using 128-bit vectors via NEON.
+*/
+
+pub mod memchr;
+pub mod packedpair;
diff --git a/vendor/memchr/src/arch/aarch64/neon/packedpair.rs b/vendor/memchr/src/arch/aarch64/neon/packedpair.rs
new file mode 100644
index 000000000..6884882df
--- /dev/null
+++ b/vendor/memchr/src/arch/aarch64/neon/packedpair.rs
@@ -0,0 +1,236 @@
+/*!
+A 128-bit vector implementation of the "packed pair" SIMD algorithm.
+
+The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main
+difference is that it (by default) uses a background distribution of byte
+frequencies to heuristically select the pair of bytes to search for.
+
+[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last
+*/
+
+use core::arch::aarch64::uint8x16_t;
+
+use crate::arch::{all::packedpair::Pair, generic::packedpair};
+
+/// A "packed pair" finder that uses 128-bit vector operations.
+///
+/// This finder picks two bytes that it believes have high predictive power
+/// for indicating an overall match of a needle. Depending on whether
+/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets
+/// where the needle matches or could match. In the prefilter case, candidates
+/// are reported whenever the [`Pair`] of bytes given matches.
+#[derive(Clone, Copy, Debug)]
+pub struct Finder(packedpair::Finder<uint8x16_t>);
+
+/// A "packed pair" finder that uses 128-bit vector operations.
+///
+/// This finder picks two bytes that it believes have high predictive power
+/// for indicating an overall match of a needle. Depending on whether
+/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets
+/// where the needle matches or could match. In the prefilter case, candidates
+/// are reported whenever the [`Pair`] of bytes given matches.
+impl Finder {
+ /// Create a new pair searcher. The searcher returned can either report
+ /// exact matches of `needle` or act as a prefilter and report candidate
+ /// positions of `needle`.
+ ///
+ /// If neon is unavailable in the current environment or if a [`Pair`]
+ /// could not be constructed from the needle given, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle: &[u8]) -> Option<Finder> {
+ Finder::with_pair(needle, Pair::new(needle)?)
+ }
+
+ /// Create a new "packed pair" finder using the pair of bytes given.
+ ///
+ /// This constructor permits callers to control precisely which pair of
+ /// bytes is used as a predicate.
+ ///
+ /// If neon is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn with_pair(needle: &[u8], pair: Pair) -> Option<Finder> {
+ if Finder::is_available() {
+ // SAFETY: we check that sse2 is available above. We are also
+ // guaranteed to have needle.len() > 1 because we have a valid
+ // Pair.
+ unsafe { Some(Finder::with_pair_impl(needle, pair)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new `Finder` specific to neon vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as the safety for `packedpair::Finder::new`, and callers must also
+ /// ensure that neon is available.
+ #[target_feature(enable = "neon")]
+ #[inline]
+ unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder {
+ let finder = packedpair::Finder::<uint8x16_t>::new(needle, pair);
+ Finder(finder)
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Finder::with_pair`] will
+ /// return a `Some` value. Similarly, when it is false, it is guaranteed
+ /// that `Finder::with_pair` will return a `None` value. Notice that this
+ /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely,
+ /// even when `Finder::is_available` is true, it is not guaranteed that a
+ /// valid [`Pair`] can be found from the needle given.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(target_feature = "neon")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "neon"))]
+ {
+ false
+ }
+ }
+
+ /// Execute a search using neon vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ #[inline]
+ pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> {
+ // SAFETY: Building a `Finder` means it's safe to call 'neon' routines.
+ unsafe { self.find_impl(haystack, needle) }
+ }
+
+ /// Execute a search using neon vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ #[inline]
+ pub fn find_prefilter(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: Building a `Finder` means it's safe to call 'neon' routines.
+ unsafe { self.find_prefilter_impl(haystack) }
+ }
+
+ /// Execute a search using neon vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ ///
+ /// # Safety
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Finder`, which can only be constructed
+ /// when it is safe to call `neon` routines.)
+ #[target_feature(enable = "neon")]
+ #[inline]
+ unsafe fn find_impl(
+ &self,
+ haystack: &[u8],
+ needle: &[u8],
+ ) -> Option<usize> {
+ self.0.find(haystack, needle)
+ }
+
+ /// Execute a prefilter search using neon vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ ///
+ /// # Safety
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Finder`, which can only be constructed
+ /// when it is safe to call `neon` routines.)
+ #[target_feature(enable = "neon")]
+ #[inline]
+ unsafe fn find_prefilter_impl(&self, haystack: &[u8]) -> Option<usize> {
+ self.0.find_prefilter(haystack)
+ }
+
+ /// Returns the pair of offsets (into the needle) used to check as a
+ /// predicate before confirming whether a needle exists at a particular
+ /// position.
+ #[inline]
+ pub fn pair(&self) -> &Pair {
+ self.0.pair()
+ }
+
+ /// Returns the minimum haystack length that this `Finder` can search.
+ ///
+ /// Using a haystack with length smaller than this in a search will result
+ /// in a panic. The reason for this restriction is that this finder is
+ /// meant to be a low-level component that is part of a larger substring
+ /// strategy. In that sense, it avoids trying to handle all cases and
+ /// instead only handles the cases that it can handle very well.
+ #[inline]
+ pub fn min_haystack_len(&self) -> usize {
+ self.0.min_haystack_len()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn find(haystack: &[u8], needle: &[u8]) -> Option<Option<usize>> {
+ let f = Finder::new(needle)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find(haystack, needle))
+ }
+
+ define_substring_forward_quickcheck!(find);
+
+ #[test]
+ fn forward_substring() {
+ crate::tests::substring::Runner::new().fwd(find).run()
+ }
+
+ #[test]
+ fn forward_packedpair() {
+ fn find(
+ haystack: &[u8],
+ needle: &[u8],
+ index1: u8,
+ index2: u8,
+ ) -> Option<Option<usize>> {
+ let pair = Pair::with_indices(needle, index1, index2)?;
+ let f = Finder::with_pair(needle, pair)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find(haystack, needle))
+ }
+ crate::tests::packedpair::Runner::new().fwd(find).run()
+ }
+
+ #[test]
+ fn forward_packedpair_prefilter() {
+ fn find(
+ haystack: &[u8],
+ needle: &[u8],
+ index1: u8,
+ index2: u8,
+ ) -> Option<Option<usize>> {
+ let pair = Pair::with_indices(needle, index1, index2)?;
+ let f = Finder::with_pair(needle, pair)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find_prefilter(haystack))
+ }
+ crate::tests::packedpair::Runner::new().fwd(find).run()
+ }
+}
diff --git a/vendor/memchr/src/arch/all/memchr.rs b/vendor/memchr/src/arch/all/memchr.rs
new file mode 100644
index 000000000..b211ad9ec
--- /dev/null
+++ b/vendor/memchr/src/arch/all/memchr.rs
@@ -0,0 +1,1015 @@
+/*!
+Provides architecture independent implementations of `memchr` and friends.
+
+The main types in this module are [`One`], [`Two`] and [`Three`]. They are for
+searching for one, two or three distinct bytes, respectively, in a haystack.
+Each type also has corresponding double ended iterators. These searchers
+are typically slower than hand-coded vector routines accomplishing the same
+task, but are also typically faster than naive scalar code. These routines
+effectively work by treating a `usize` as a vector of 8-bit lanes, and thus
+achieves some level of data parallelism even without explicit vector support.
+
+The `One` searcher also provides a [`One::count`] routine for efficiently
+counting the number of times a single byte occurs in a haystack. This is
+useful, for example, for counting the number of lines in a haystack. This
+routine exists because it is usually faster, especially with a high match
+count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its
+`Iterator::count` implementation to use this routine.)
+
+Only one, two and three bytes are supported because three bytes is about
+the point where one sees diminishing returns. Beyond this point and it's
+probably (but not necessarily) better to just use a simple `[bool; 256]` array
+or similar. However, it depends mightily on the specific work-load and the
+expected match frequency.
+*/
+
+use crate::{arch::generic::memchr as generic, ext::Pointer};
+
+/// The number of bytes in a single `usize` value.
+const USIZE_BYTES: usize = (usize::BITS / 8) as usize;
+/// The bits that must be zero for a `*const usize` to be properly aligned.
+const USIZE_ALIGN: usize = USIZE_BYTES - 1;
+
+/// Finds all occurrences of a single byte in a haystack.
+#[derive(Clone, Copy, Debug)]
+pub struct One {
+ s1: u8,
+ v1: usize,
+}
+
+impl One {
+ /// The number of bytes we examine per each iteration of our search loop.
+ const LOOP_BYTES: usize = 2 * USIZE_BYTES;
+
+ /// Create a new searcher that finds occurrences of the byte given.
+ #[inline]
+ pub fn new(needle: u8) -> One {
+ One { s1: needle, v1: splat(needle) }
+ }
+
+ /// A test-only routine so that we can bundle a bunch of quickcheck
+ /// properties into a single macro. Basically, this provides a constructor
+ /// that makes it identical to most other memchr implementations, which
+ /// have fallible constructors.
+ #[cfg(test)]
+ pub(crate) fn try_new(needle: u8) -> Option<One> {
+ Some(One::new(needle))
+ }
+
+ /// Return the first occurrence of the needle in the given haystack. If no
+ /// such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value for a non-empty haystack is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of the needle in the given haystack. If no
+ /// such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value for a non-empty haystack is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Counts all occurrences of this byte in the given haystack.
+ #[inline]
+ pub fn count(&self, haystack: &[u8]) -> usize {
+ // SAFETY: All of our pointers are derived directly from a borrowed
+ // slice, which is guaranteed to be valid.
+ unsafe {
+ let start = haystack.as_ptr();
+ let end = start.add(haystack.len());
+ self.count_raw(start, end)
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let confirm = |b| self.confirm(b);
+ let len = end.distance(start);
+ if len < USIZE_BYTES {
+ return generic::fwd_byte_by_byte(start, end, confirm);
+ }
+
+ // The start of the search may not be aligned to `*const usize`,
+ // so we do an unaligned load here.
+ let chunk = start.cast::<usize>().read_unaligned();
+ if self.has_needle(chunk) {
+ return generic::fwd_byte_by_byte(start, end, confirm);
+ }
+
+ // And now we start our search at a guaranteed aligned position.
+ // The first iteration of the loop below will overlap with the the
+ // unaligned chunk above in cases where the search starts at an
+ // unaligned offset, but that's okay as we're only here if that
+ // above didn't find a match.
+ let mut cur =
+ start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN));
+ debug_assert!(cur > start);
+ if len <= One::LOOP_BYTES {
+ return generic::fwd_byte_by_byte(cur, end, confirm);
+ }
+ debug_assert!(end.sub(One::LOOP_BYTES) >= start);
+ while cur <= end.sub(One::LOOP_BYTES) {
+ debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES);
+
+ let a = cur.cast::<usize>().read();
+ let b = cur.add(USIZE_BYTES).cast::<usize>().read();
+ if self.has_needle(a) || self.has_needle(b) {
+ break;
+ }
+ cur = cur.add(One::LOOP_BYTES);
+ }
+ generic::fwd_byte_by_byte(cur, end, confirm)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let confirm = |b| self.confirm(b);
+ let len = end.distance(start);
+ if len < USIZE_BYTES {
+ return generic::rev_byte_by_byte(start, end, confirm);
+ }
+
+ let chunk = end.sub(USIZE_BYTES).cast::<usize>().read_unaligned();
+ if self.has_needle(chunk) {
+ return generic::rev_byte_by_byte(start, end, confirm);
+ }
+
+ let mut cur = end.sub(end.as_usize() & USIZE_ALIGN);
+ debug_assert!(start <= cur && cur <= end);
+ if len <= One::LOOP_BYTES {
+ return generic::rev_byte_by_byte(start, cur, confirm);
+ }
+ while cur >= start.add(One::LOOP_BYTES) {
+ debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES);
+
+ let a = cur.sub(2 * USIZE_BYTES).cast::<usize>().read();
+ let b = cur.sub(1 * USIZE_BYTES).cast::<usize>().read();
+ if self.has_needle(a) || self.has_needle(b) {
+ break;
+ }
+ cur = cur.sub(One::LOOP_BYTES);
+ }
+ generic::rev_byte_by_byte(start, cur, confirm)
+ }
+
+ /// Counts all occurrences of this byte in the given haystack represented
+ /// by raw pointers.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `0` will always be returned.
+ #[inline]
+ pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize {
+ if start >= end {
+ return 0;
+ }
+ let confirm = |b| self.confirm(b);
+ let len = end.distance(start);
+ if len < USIZE_BYTES {
+ return generic::count_byte_by_byte(start, end, confirm);
+ }
+
+ // And now we start our search at a guaranteed aligned position.
+ let mut cur =
+ start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN));
+ debug_assert!(cur > start);
+ // Count any bytes that start before the first aligned boundary.
+ let mut count = generic::count_byte_by_byte(start, cur, confirm);
+ if len <= One::LOOP_BYTES {
+ return count + generic::count_byte_by_byte(cur, end, confirm);
+ }
+ debug_assert!(end.sub(One::LOOP_BYTES) >= start);
+ while cur <= end.sub(One::LOOP_BYTES) {
+ debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES);
+
+ let a = cur.cast::<usize>().read();
+ let b = cur.add(USIZE_BYTES).cast::<usize>().read();
+ count += self.count_bytes(a);
+ count += self.count_bytes(b);
+ cur = cur.add(One::LOOP_BYTES);
+ }
+ count += generic::count_byte_by_byte(cur, end, confirm);
+ count
+ }
+
+ /// Returns an iterator over all occurrences of the needle byte in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> {
+ OneIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+
+ #[inline(always)]
+ fn has_needle(&self, chunk: usize) -> bool {
+ has_zero_byte(self.v1 ^ chunk)
+ }
+
+ #[inline(always)]
+ fn count_bytes(&self, chunk: usize) -> usize {
+ count_bytes(self.v1 ^ chunk)
+ }
+
+ #[inline(always)]
+ fn confirm(&self, haystack_byte: u8) -> bool {
+ self.s1 == haystack_byte
+ }
+}
+
+/// An iterator over all occurrences of a single byte in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`One::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`One`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct OneIter<'a, 'h> {
+ /// The underlying memchr searcher.
+ searcher: &'a One,
+ /// Generic iterator implementation.
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for OneIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count(|s, e| {
+ // SAFETY: We rely on our generic iterator to return valid start
+ // and end pointers.
+ unsafe { self.searcher.count_raw(s, e) }
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+/// Finds all occurrences of two bytes in a haystack.
+///
+/// That is, this reports matches of one of two possible bytes. For example,
+/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`,
+/// `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub struct Two {
+ s1: u8,
+ s2: u8,
+ v1: usize,
+ v2: usize,
+}
+
+impl Two {
+ /// Create a new searcher that finds occurrences of the two needle bytes
+ /// given.
+ #[inline]
+ pub fn new(needle1: u8, needle2: u8) -> Two {
+ Two {
+ s1: needle1,
+ s2: needle2,
+ v1: splat(needle1),
+ v2: splat(needle2),
+ }
+ }
+
+ /// A test-only routine so that we can bundle a bunch of quickcheck
+ /// properties into a single macro. Basically, this provides a constructor
+ /// that makes it identical to most other memchr implementations, which
+ /// have fallible constructors.
+ #[cfg(test)]
+ pub(crate) fn try_new(needle1: u8, needle2: u8) -> Option<Two> {
+ Some(Two::new(needle1, needle2))
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value for a non-empty haystack is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value for a non-empty haystack is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let confirm = |b| self.confirm(b);
+ let len = end.distance(start);
+ if len < USIZE_BYTES {
+ return generic::fwd_byte_by_byte(start, end, confirm);
+ }
+
+ // The start of the search may not be aligned to `*const usize`,
+ // so we do an unaligned load here.
+ let chunk = start.cast::<usize>().read_unaligned();
+ if self.has_needle(chunk) {
+ return generic::fwd_byte_by_byte(start, end, confirm);
+ }
+
+ // And now we start our search at a guaranteed aligned position.
+ // The first iteration of the loop below will overlap with the the
+ // unaligned chunk above in cases where the search starts at an
+ // unaligned offset, but that's okay as we're only here if that
+ // above didn't find a match.
+ let mut cur =
+ start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN));
+ debug_assert!(cur > start);
+ debug_assert!(end.sub(USIZE_BYTES) >= start);
+ while cur <= end.sub(USIZE_BYTES) {
+ debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES);
+
+ let chunk = cur.cast::<usize>().read();
+ if self.has_needle(chunk) {
+ break;
+ }
+ cur = cur.add(USIZE_BYTES);
+ }
+ generic::fwd_byte_by_byte(cur, end, confirm)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let confirm = |b| self.confirm(b);
+ let len = end.distance(start);
+ if len < USIZE_BYTES {
+ return generic::rev_byte_by_byte(start, end, confirm);
+ }
+
+ let chunk = end.sub(USIZE_BYTES).cast::<usize>().read_unaligned();
+ if self.has_needle(chunk) {
+ return generic::rev_byte_by_byte(start, end, confirm);
+ }
+
+ let mut cur = end.sub(end.as_usize() & USIZE_ALIGN);
+ debug_assert!(start <= cur && cur <= end);
+ while cur >= start.add(USIZE_BYTES) {
+ debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES);
+
+ let chunk = cur.sub(USIZE_BYTES).cast::<usize>().read();
+ if self.has_needle(chunk) {
+ break;
+ }
+ cur = cur.sub(USIZE_BYTES);
+ }
+ generic::rev_byte_by_byte(start, cur, confirm)
+ }
+
+ /// Returns an iterator over all occurrences of one of the needle bytes in
+ /// the given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> {
+ TwoIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+
+ #[inline(always)]
+ fn has_needle(&self, chunk: usize) -> bool {
+ has_zero_byte(self.v1 ^ chunk) || has_zero_byte(self.v2 ^ chunk)
+ }
+
+ #[inline(always)]
+ fn confirm(&self, haystack_byte: u8) -> bool {
+ self.s1 == haystack_byte || self.s2 == haystack_byte
+ }
+}
+
+/// An iterator over all occurrences of two possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`Two::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`Two`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct TwoIter<'a, 'h> {
+ /// The underlying memchr searcher.
+ searcher: &'a Two,
+ /// Generic iterator implementation.
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for TwoIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+/// Finds all occurrences of three bytes in a haystack.
+///
+/// That is, this reports matches of one of three possible bytes. For example,
+/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets
+/// `0`, `2`, `3`, `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub struct Three {
+ s1: u8,
+ s2: u8,
+ s3: u8,
+ v1: usize,
+ v2: usize,
+ v3: usize,
+}
+
+impl Three {
+ /// Create a new searcher that finds occurrences of the three needle bytes
+ /// given.
+ #[inline]
+ pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Three {
+ Three {
+ s1: needle1,
+ s2: needle2,
+ s3: needle3,
+ v1: splat(needle1),
+ v2: splat(needle2),
+ v3: splat(needle3),
+ }
+ }
+
+ /// A test-only routine so that we can bundle a bunch of quickcheck
+ /// properties into a single macro. Basically, this provides a constructor
+ /// that makes it identical to most other memchr implementations, which
+ /// have fallible constructors.
+ #[cfg(test)]
+ pub(crate) fn try_new(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ ) -> Option<Three> {
+ Some(Three::new(needle1, needle2, needle3))
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value for a non-empty haystack is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value for a non-empty haystack is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let confirm = |b| self.confirm(b);
+ let len = end.distance(start);
+ if len < USIZE_BYTES {
+ return generic::fwd_byte_by_byte(start, end, confirm);
+ }
+
+ // The start of the search may not be aligned to `*const usize`,
+ // so we do an unaligned load here.
+ let chunk = start.cast::<usize>().read_unaligned();
+ if self.has_needle(chunk) {
+ return generic::fwd_byte_by_byte(start, end, confirm);
+ }
+
+ // And now we start our search at a guaranteed aligned position.
+ // The first iteration of the loop below will overlap with the the
+ // unaligned chunk above in cases where the search starts at an
+ // unaligned offset, but that's okay as we're only here if that
+ // above didn't find a match.
+ let mut cur =
+ start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN));
+ debug_assert!(cur > start);
+ debug_assert!(end.sub(USIZE_BYTES) >= start);
+ while cur <= end.sub(USIZE_BYTES) {
+ debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES);
+
+ let chunk = cur.cast::<usize>().read();
+ if self.has_needle(chunk) {
+ break;
+ }
+ cur = cur.add(USIZE_BYTES);
+ }
+ generic::fwd_byte_by_byte(cur, end, confirm)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let confirm = |b| self.confirm(b);
+ let len = end.distance(start);
+ if len < USIZE_BYTES {
+ return generic::rev_byte_by_byte(start, end, confirm);
+ }
+
+ let chunk = end.sub(USIZE_BYTES).cast::<usize>().read_unaligned();
+ if self.has_needle(chunk) {
+ return generic::rev_byte_by_byte(start, end, confirm);
+ }
+
+ let mut cur = end.sub(end.as_usize() & USIZE_ALIGN);
+ debug_assert!(start <= cur && cur <= end);
+ while cur >= start.add(USIZE_BYTES) {
+ debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES);
+
+ let chunk = cur.sub(USIZE_BYTES).cast::<usize>().read();
+ if self.has_needle(chunk) {
+ break;
+ }
+ cur = cur.sub(USIZE_BYTES);
+ }
+ generic::rev_byte_by_byte(start, cur, confirm)
+ }
+
+ /// Returns an iterator over all occurrences of one of the needle bytes in
+ /// the given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> {
+ ThreeIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+
+ #[inline(always)]
+ fn has_needle(&self, chunk: usize) -> bool {
+ has_zero_byte(self.v1 ^ chunk)
+ || has_zero_byte(self.v2 ^ chunk)
+ || has_zero_byte(self.v3 ^ chunk)
+ }
+
+ #[inline(always)]
+ fn confirm(&self, haystack_byte: u8) -> bool {
+ self.s1 == haystack_byte
+ || self.s2 == haystack_byte
+ || self.s3 == haystack_byte
+ }
+}
+
+/// An iterator over all occurrences of three possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`Three::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`Three`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct ThreeIter<'a, 'h> {
+ /// The underlying memchr searcher.
+ searcher: &'a Three,
+ /// Generic iterator implementation.
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for ThreeIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+/// Return `true` if `x` contains any zero byte.
+///
+/// That is, this routine treats `x` as a register of 8-bit lanes and returns
+/// true when any of those lanes is `0`.
+///
+/// From "Matters Computational" by J. Arndt.
+#[inline(always)]
+fn has_zero_byte(x: usize) -> bool {
+ // "The idea is to subtract one from each of the bytes and then look for
+ // bytes where the borrow propagated all the way to the most significant
+ // bit."
+ const LO: usize = splat(0x01);
+ const HI: usize = splat(0x80);
+
+ (x.wrapping_sub(LO) & !x & HI) != 0
+}
+
+#[inline(always)]
+fn count_bytes(chunk: usize) -> usize {
+ const LO: usize = splat(0x01);
+ const HI: usize = splat(0x80);
+
+ (chunk.wrapping_sub(LO) & !chunk & HI).count_ones() as usize
+}
+
+/// Repeat the given byte into a word size number. That is, every 8 bits
+/// is equivalent to the given byte. For example, if `b` is `\x4E` or
+/// `01001110` in binary, then the returned value on a 32-bit system would be:
+/// `01001110_01001110_01001110_01001110`.
+#[inline(always)]
+const fn splat(b: u8) -> usize {
+ // TODO: use `usize::from` once it can be used in const context.
+ (b as usize) * (usize::MAX / 255)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ define_memchr_quickcheck!(super, try_new);
+
+ #[test]
+ fn forward_one() {
+ crate::tests::memchr::Runner::new(1).forward_iter(
+ |haystack, needles| {
+ Some(One::new(needles[0]).iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_one() {
+ crate::tests::memchr::Runner::new(1).reverse_iter(
+ |haystack, needles| {
+ Some(One::new(needles[0]).iter(haystack).rev().collect())
+ },
+ )
+ }
+
+ #[test]
+ fn count_one() {
+ crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| {
+ Some(One::new(needles[0]).iter(haystack).count())
+ })
+ }
+
+ #[test]
+ fn forward_two() {
+ crate::tests::memchr::Runner::new(2).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(Two::new(n1, n2).iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_two() {
+ crate::tests::memchr::Runner::new(2).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(Two::new(n1, n2).iter(haystack).rev().collect())
+ },
+ )
+ }
+
+ #[test]
+ fn forward_three() {
+ crate::tests::memchr::Runner::new(3).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(Three::new(n1, n2, n3).iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_three() {
+ crate::tests::memchr::Runner::new(3).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(Three::new(n1, n2, n3).iter(haystack).rev().collect())
+ },
+ )
+ }
+
+ // This was found by quickcheck in the course of refactoring this crate
+ // after memchr 2.5.0.
+ #[test]
+ fn regression_double_ended_iterator() {
+ let finder = One::new(b'a');
+ let haystack = "a";
+ let mut it = finder.iter(haystack.as_bytes());
+ assert_eq!(Some(0), it.next());
+ assert_eq!(None, it.next_back());
+ }
+}
diff --git a/vendor/memchr/src/arch/all/mod.rs b/vendor/memchr/src/arch/all/mod.rs
new file mode 100644
index 000000000..b3d0ca2e9
--- /dev/null
+++ b/vendor/memchr/src/arch/all/mod.rs
@@ -0,0 +1,236 @@
+/*!
+Contains architecture independent routines.
+
+These routines are often used as a "fallback" implementation when the more
+specialized architecture dependent routines are unavailable.
+*/
+
+pub mod memchr;
+pub mod packedpair;
+pub mod rabinkarp;
+#[cfg(feature = "alloc")]
+pub mod shiftor;
+pub mod twoway;
+
+/// Returns true if and only if `needle` is a prefix of `haystack`.
+///
+/// This uses a latency optimized variant of `memcmp` internally which *might*
+/// make this faster for very short strings.
+///
+/// # Inlining
+///
+/// This routine is marked `inline(always)`. If you want to call this function
+/// in a way that is not always inlined, you'll need to wrap a call to it in
+/// another function that is marked as `inline(never)` or just `inline`.
+#[inline(always)]
+pub fn is_prefix(haystack: &[u8], needle: &[u8]) -> bool {
+ needle.len() <= haystack.len()
+ && is_equal(&haystack[..needle.len()], needle)
+}
+
+/// Returns true if and only if `needle` is a suffix of `haystack`.
+///
+/// This uses a latency optimized variant of `memcmp` internally which *might*
+/// make this faster for very short strings.
+///
+/// # Inlining
+///
+/// This routine is marked `inline(always)`. If you want to call this function
+/// in a way that is not always inlined, you'll need to wrap a call to it in
+/// another function that is marked as `inline(never)` or just `inline`.
+#[inline(always)]
+pub fn is_suffix(haystack: &[u8], needle: &[u8]) -> bool {
+ needle.len() <= haystack.len()
+ && is_equal(&haystack[haystack.len() - needle.len()..], needle)
+}
+
+/// Compare corresponding bytes in `x` and `y` for equality.
+///
+/// That is, this returns true if and only if `x.len() == y.len()` and
+/// `x[i] == y[i]` for all `0 <= i < x.len()`.
+///
+/// # Inlining
+///
+/// This routine is marked `inline(always)`. If you want to call this function
+/// in a way that is not always inlined, you'll need to wrap a call to it in
+/// another function that is marked as `inline(never)` or just `inline`.
+///
+/// # Motivation
+///
+/// Why not use slice equality instead? Well, slice equality usually results in
+/// a call out to the current platform's `libc` which might not be inlineable
+/// or have other overhead. This routine isn't guaranteed to be a win, but it
+/// might be in some cases.
+#[inline(always)]
+pub fn is_equal(x: &[u8], y: &[u8]) -> bool {
+ if x.len() != y.len() {
+ return false;
+ }
+ // SAFETY: Our pointers are derived directly from borrowed slices which
+ // uphold all of our safety guarantees except for length. We account for
+ // length with the check above.
+ unsafe { is_equal_raw(x.as_ptr(), y.as_ptr(), x.len()) }
+}
+
+/// Compare `n` bytes at the given pointers for equality.
+///
+/// This returns true if and only if `*x.add(i) == *y.add(i)` for all
+/// `0 <= i < n`.
+///
+/// # Inlining
+///
+/// This routine is marked `inline(always)`. If you want to call this function
+/// in a way that is not always inlined, you'll need to wrap a call to it in
+/// another function that is marked as `inline(never)` or just `inline`.
+///
+/// # Motivation
+///
+/// Why not use slice equality instead? Well, slice equality usually results in
+/// a call out to the current platform's `libc` which might not be inlineable
+/// or have other overhead. This routine isn't guaranteed to be a win, but it
+/// might be in some cases.
+///
+/// # Safety
+///
+/// * Both `x` and `y` must be valid for reads of up to `n` bytes.
+/// * Both `x` and `y` must point to an initialized value.
+/// * Both `x` and `y` must each point to an allocated object and
+/// must either be in bounds or at most one byte past the end of the
+/// allocated object. `x` and `y` do not need to point to the same allocated
+/// object, but they may.
+/// * Both `x` and `y` must be _derived from_ a pointer to their respective
+/// allocated objects.
+/// * The distance between `x` and `x+n` must not overflow `isize`. Similarly
+/// for `y` and `y+n`.
+/// * The distance being in bounds must not rely on "wrapping around" the
+/// address space.
+#[inline(always)]
+pub unsafe fn is_equal_raw(
+ mut x: *const u8,
+ mut y: *const u8,
+ n: usize,
+) -> bool {
+ // If we don't have enough bytes to do 4-byte at a time loads, then
+ // handle each possible length specially. Note that I used to have a
+ // byte-at-a-time loop here and that turned out to be quite a bit slower
+ // for the memmem/pathological/defeat-simple-vector-alphabet benchmark.
+ if n < 4 {
+ return match n {
+ 0 => true,
+ 1 => x.read() == y.read(),
+ 2 => {
+ x.cast::<u16>().read_unaligned()
+ == y.cast::<u16>().read_unaligned()
+ }
+ // I also tried copy_nonoverlapping here and it looks like the
+ // codegen is the same.
+ 3 => x.cast::<[u8; 3]>().read() == y.cast::<[u8; 3]>().read(),
+ _ => unreachable!(),
+ };
+ }
+ // When we have 4 or more bytes to compare, then proceed in chunks of 4 at
+ // a time using unaligned loads.
+ //
+ // Also, why do 4 byte loads instead of, say, 8 byte loads? The reason is
+ // that this particular version of memcmp is likely to be called with tiny
+ // needles. That means that if we do 8 byte loads, then a higher proportion
+ // of memcmp calls will use the slower variant above. With that said, this
+ // is a hypothesis and is only loosely supported by benchmarks. There's
+ // likely some improvement that could be made here. The main thing here
+ // though is to optimize for latency, not throughput.
+
+ // SAFETY: The caller is responsible for ensuring the pointers we get are
+ // valid and readable for at least `n` bytes. We also do unaligned loads,
+ // so there's no need to ensure we're aligned. (This is justified by this
+ // routine being specifically for short strings.)
+ let xend = x.add(n.wrapping_sub(4));
+ let yend = y.add(n.wrapping_sub(4));
+ while x < xend {
+ let vx = x.cast::<u32>().read_unaligned();
+ let vy = y.cast::<u32>().read_unaligned();
+ if vx != vy {
+ return false;
+ }
+ x = x.add(4);
+ y = y.add(4);
+ }
+ let vx = xend.cast::<u32>().read_unaligned();
+ let vy = yend.cast::<u32>().read_unaligned();
+ vx == vy
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn equals_different_lengths() {
+ assert!(!is_equal(b"", b"a"));
+ assert!(!is_equal(b"a", b""));
+ assert!(!is_equal(b"ab", b"a"));
+ assert!(!is_equal(b"a", b"ab"));
+ }
+
+ #[test]
+ fn equals_mismatch() {
+ let one_mismatch = [
+ (&b"a"[..], &b"x"[..]),
+ (&b"ab"[..], &b"ax"[..]),
+ (&b"abc"[..], &b"abx"[..]),
+ (&b"abcd"[..], &b"abcx"[..]),
+ (&b"abcde"[..], &b"abcdx"[..]),
+ (&b"abcdef"[..], &b"abcdex"[..]),
+ (&b"abcdefg"[..], &b"abcdefx"[..]),
+ (&b"abcdefgh"[..], &b"abcdefgx"[..]),
+ (&b"abcdefghi"[..], &b"abcdefghx"[..]),
+ (&b"abcdefghij"[..], &b"abcdefghix"[..]),
+ (&b"abcdefghijk"[..], &b"abcdefghijx"[..]),
+ (&b"abcdefghijkl"[..], &b"abcdefghijkx"[..]),
+ (&b"abcdefghijklm"[..], &b"abcdefghijklx"[..]),
+ (&b"abcdefghijklmn"[..], &b"abcdefghijklmx"[..]),
+ ];
+ for (x, y) in one_mismatch {
+ assert_eq!(x.len(), y.len(), "lengths should match");
+ assert!(!is_equal(x, y));
+ assert!(!is_equal(y, x));
+ }
+ }
+
+ #[test]
+ fn equals_yes() {
+ assert!(is_equal(b"", b""));
+ assert!(is_equal(b"a", b"a"));
+ assert!(is_equal(b"ab", b"ab"));
+ assert!(is_equal(b"abc", b"abc"));
+ assert!(is_equal(b"abcd", b"abcd"));
+ assert!(is_equal(b"abcde", b"abcde"));
+ assert!(is_equal(b"abcdef", b"abcdef"));
+ assert!(is_equal(b"abcdefg", b"abcdefg"));
+ assert!(is_equal(b"abcdefgh", b"abcdefgh"));
+ assert!(is_equal(b"abcdefghi", b"abcdefghi"));
+ }
+
+ #[test]
+ fn prefix() {
+ assert!(is_prefix(b"", b""));
+ assert!(is_prefix(b"a", b""));
+ assert!(is_prefix(b"ab", b""));
+ assert!(is_prefix(b"foo", b"foo"));
+ assert!(is_prefix(b"foobar", b"foo"));
+
+ assert!(!is_prefix(b"foo", b"fob"));
+ assert!(!is_prefix(b"foobar", b"fob"));
+ }
+
+ #[test]
+ fn suffix() {
+ assert!(is_suffix(b"", b""));
+ assert!(is_suffix(b"a", b""));
+ assert!(is_suffix(b"ab", b""));
+ assert!(is_suffix(b"foo", b"foo"));
+ assert!(is_suffix(b"foobar", b"bar"));
+
+ assert!(!is_suffix(b"foo", b"goo"));
+ assert!(!is_suffix(b"foobar", b"gar"));
+ }
+}
diff --git a/vendor/memchr/src/memmem/byte_frequencies.rs b/vendor/memchr/src/arch/all/packedpair/default_rank.rs
index c313b629d..6aa3895e6 100644
--- a/vendor/memchr/src/memmem/byte_frequencies.rs
+++ b/vendor/memchr/src/arch/all/packedpair/default_rank.rs
@@ -1,4 +1,4 @@
-pub const BYTE_FREQUENCIES: [u8; 256] = [
+pub(crate) const RANK: [u8; 256] = [
55, // '\x00'
52, // '\x01'
51, // '\x02'
diff --git a/vendor/memchr/src/arch/all/packedpair/mod.rs b/vendor/memchr/src/arch/all/packedpair/mod.rs
new file mode 100644
index 000000000..148a98552
--- /dev/null
+++ b/vendor/memchr/src/arch/all/packedpair/mod.rs
@@ -0,0 +1,359 @@
+/*!
+Provides an architecture independent implementation of the "packed pair"
+algorithm.
+
+The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main
+difference is that it (by default) uses a background distribution of byte
+frequencies to heuristically select the pair of bytes to search for. Note that
+this module provides an architecture independent version that doesn't do as
+good of a job keeping the search for candidates inside a SIMD hot path. It
+however can be good enough in many circumstances.
+
+[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last
+*/
+
+use crate::memchr;
+
+mod default_rank;
+
+/// An architecture independent "packed pair" finder.
+///
+/// This finder picks two bytes that it believes have high predictive power for
+/// indicating an overall match of a needle. At search time, it reports offsets
+/// where the needle could match based on whether the pair of bytes it chose
+/// match.
+///
+/// This is architecture independent because it utilizes `memchr` to find the
+/// occurrence of one of the bytes in the pair, and then checks whether the
+/// second byte matches. If it does, in the case of [`Finder::find_prefilter`],
+/// the location at which the needle could match is returned.
+///
+/// It is generally preferred to use architecture specific routines for a
+/// "packed pair" prefilter, but this can be a useful fallback when the
+/// architecture independent routines are unavailable.
+#[derive(Clone, Copy, Debug)]
+pub struct Finder {
+ pair: Pair,
+ byte1: u8,
+ byte2: u8,
+}
+
+impl Finder {
+ /// Create a new prefilter that reports possible locations where the given
+ /// needle matches.
+ #[inline]
+ pub fn new(needle: &[u8]) -> Option<Finder> {
+ Finder::with_pair(needle, Pair::new(needle)?)
+ }
+
+ /// Create a new prefilter using the pair given.
+ ///
+ /// If the prefilter could not be constructed, then `None` is returned.
+ ///
+ /// This constructor permits callers to control precisely which pair of
+ /// bytes is used as a predicate.
+ #[inline]
+ pub fn with_pair(needle: &[u8], pair: Pair) -> Option<Finder> {
+ let byte1 = needle[usize::from(pair.index1())];
+ let byte2 = needle[usize::from(pair.index2())];
+ // Currently this can never fail so we could just return a Finder,
+ // but it's conceivable this could change.
+ Some(Finder { pair, byte1, byte2 })
+ }
+
+ /// Run this finder on the given haystack as a prefilter.
+ ///
+ /// If a candidate match is found, then an offset where the needle *could*
+ /// begin in the haystack is returned.
+ #[inline]
+ pub fn find_prefilter(&self, haystack: &[u8]) -> Option<usize> {
+ let mut i = 0;
+ let index1 = usize::from(self.pair.index1());
+ let index2 = usize::from(self.pair.index2());
+ loop {
+ // Use a fast vectorized implementation to skip to the next
+ // occurrence of the rarest byte (heuristically chosen) in the
+ // needle.
+ i += memchr(self.byte1, &haystack[i..])?;
+ let found = i;
+ i += 1;
+
+ // If we can't align our first byte match with the haystack, then a
+ // match is impossible.
+ let aligned1 = match found.checked_sub(index1) {
+ None => continue,
+ Some(aligned1) => aligned1,
+ };
+
+ // Now align the second byte match with the haystack. A mismatch
+ // means that a match is impossible.
+ let aligned2 = match aligned1.checked_add(index2) {
+ None => continue,
+ Some(aligned_index2) => aligned_index2,
+ };
+ if haystack.get(aligned2).map_or(true, |&b| b != self.byte2) {
+ continue;
+ }
+
+ // We've done what we can. There might be a match here.
+ return Some(aligned1);
+ }
+ }
+
+ /// Returns the pair of offsets (into the needle) used to check as a
+ /// predicate before confirming whether a needle exists at a particular
+ /// position.
+ #[inline]
+ pub fn pair(&self) -> &Pair {
+ &self.pair
+ }
+}
+
+/// A pair of byte offsets into a needle to use as a predicate.
+///
+/// This pair is used as a predicate to quickly filter out positions in a
+/// haystack in which a needle cannot match. In some cases, this pair can even
+/// be used in vector algorithms such that the vector algorithm only switches
+/// over to scalar code once this pair has been found.
+///
+/// A pair of offsets can be used in both substring search implementations and
+/// in prefilters. The former will report matches of a needle in a haystack
+/// where as the latter will only report possible matches of a needle.
+///
+/// The offsets are limited each to a maximum of 255 to keep memory usage low.
+/// Moreover, it's rarely advantageous to create a predicate using offsets
+/// greater than 255 anyway.
+///
+/// The only guarantee enforced on the pair of offsets is that they are not
+/// equivalent. It is not necessarily the case that `index1 < index2` for
+/// example. By convention, `index1` corresponds to the byte in the needle
+/// that is believed to be most the predictive. Note also that because of the
+/// requirement that the indices be both valid for the needle used to build
+/// the pair and not equal, it follows that a pair can only be constructed for
+/// needles with length at least 2.
+#[derive(Clone, Copy, Debug)]
+pub struct Pair {
+ index1: u8,
+ index2: u8,
+}
+
+impl Pair {
+ /// Create a new pair of offsets from the given needle.
+ ///
+ /// If a pair could not be created (for example, if the needle is too
+ /// short), then `None` is returned.
+ ///
+ /// This chooses the pair in the needle that is believed to be as
+ /// predictive of an overall match of the needle as possible.
+ #[inline]
+ pub fn new(needle: &[u8]) -> Option<Pair> {
+ Pair::with_ranker(needle, DefaultFrequencyRank)
+ }
+
+ /// Create a new pair of offsets from the given needle and ranker.
+ ///
+ /// This permits the caller to choose a background frequency distribution
+ /// with which bytes are selected. The idea is to select a pair of bytes
+ /// that is believed to strongly predict a match in the haystack. This
+ /// usually means selecting bytes that occur rarely in a haystack.
+ ///
+ /// If a pair could not be created (for example, if the needle is too
+ /// short), then `None` is returned.
+ #[inline]
+ pub fn with_ranker<R: HeuristicFrequencyRank>(
+ needle: &[u8],
+ ranker: R,
+ ) -> Option<Pair> {
+ if needle.len() <= 1 {
+ return None;
+ }
+ // Find the rarest two bytes. We make them distinct indices by
+ // construction. (The actual byte value may be the same in degenerate
+ // cases, but that's OK.)
+ let (mut rare1, mut index1) = (needle[0], 0);
+ let (mut rare2, mut index2) = (needle[1], 1);
+ if ranker.rank(rare2) < ranker.rank(rare1) {
+ core::mem::swap(&mut rare1, &mut rare2);
+ core::mem::swap(&mut index1, &mut index2);
+ }
+ let max = usize::from(core::u8::MAX);
+ for (i, &b) in needle.iter().enumerate().take(max).skip(2) {
+ if ranker.rank(b) < ranker.rank(rare1) {
+ rare2 = rare1;
+ index2 = index1;
+ rare1 = b;
+ index1 = u8::try_from(i).unwrap();
+ } else if b != rare1 && ranker.rank(b) < ranker.rank(rare2) {
+ rare2 = b;
+ index2 = u8::try_from(i).unwrap();
+ }
+ }
+ // While not strictly required for how a Pair is normally used, we
+ // really don't want these to be equivalent. If they were, it would
+ // reduce the effectiveness of candidate searching using these rare
+ // bytes by increasing the rate of false positives.
+ assert_ne!(index1, index2);
+ Some(Pair { index1, index2 })
+ }
+
+ /// Create a new pair using the offsets given for the needle given.
+ ///
+ /// This bypasses any sort of heuristic process for choosing the offsets
+ /// and permits the caller to choose the offsets themselves.
+ ///
+ /// Indices are limited to valid `u8` values so that a `Pair` uses less
+ /// memory. It is not possible to create a `Pair` with offsets bigger than
+ /// `u8::MAX`. It's likely that such a thing is not needed, but if it is,
+ /// it's suggested to build your own bespoke algorithm because you're
+ /// likely working on a very niche case. (File an issue if this suggestion
+ /// does not make sense to you.)
+ ///
+ /// If a pair could not be created (for example, if the needle is too
+ /// short), then `None` is returned.
+ #[inline]
+ pub fn with_indices(
+ needle: &[u8],
+ index1: u8,
+ index2: u8,
+ ) -> Option<Pair> {
+ // While not strictly required for how a Pair is normally used, we
+ // really don't want these to be equivalent. If they were, it would
+ // reduce the effectiveness of candidate searching using these rare
+ // bytes by increasing the rate of false positives.
+ if index1 == index2 {
+ return None;
+ }
+ // Similarly, invalid indices means the Pair is invalid too.
+ if usize::from(index1) >= needle.len() {
+ return None;
+ }
+ if usize::from(index2) >= needle.len() {
+ return None;
+ }
+ Some(Pair { index1, index2 })
+ }
+
+ /// Returns the first offset of the pair.
+ #[inline]
+ pub fn index1(&self) -> u8 {
+ self.index1
+ }
+
+ /// Returns the second offset of the pair.
+ #[inline]
+ pub fn index2(&self) -> u8 {
+ self.index2
+ }
+}
+
+/// This trait allows the user to customize the heuristic used to determine the
+/// relative frequency of a given byte in the dataset being searched.
+///
+/// The use of this trait can have a dramatic impact on performance depending
+/// on the type of data being searched. The details of why are explained in the
+/// docs of [`crate::memmem::Prefilter`]. To summarize, the core algorithm uses
+/// a prefilter to quickly identify candidate matches that are later verified
+/// more slowly. This prefilter is implemented in terms of trying to find
+/// `rare` bytes at specific offsets that will occur less frequently in the
+/// dataset. While the concept of a `rare` byte is similar for most datasets,
+/// there are some specific datasets (like binary executables) that have
+/// dramatically different byte distributions. For these datasets customizing
+/// the byte frequency heuristic can have a massive impact on performance, and
+/// might even need to be done at runtime.
+///
+/// The default implementation of `HeuristicFrequencyRank` reads from the
+/// static frequency table defined in `src/memmem/byte_frequencies.rs`. This
+/// is optimal for most inputs, so if you are unsure of the impact of using a
+/// custom `HeuristicFrequencyRank` you should probably just use the default.
+///
+/// # Example
+///
+/// ```
+/// use memchr::{
+/// arch::all::packedpair::HeuristicFrequencyRank,
+/// memmem::FinderBuilder,
+/// };
+///
+/// /// A byte-frequency table that is good for scanning binary executables.
+/// struct Binary;
+///
+/// impl HeuristicFrequencyRank for Binary {
+/// fn rank(&self, byte: u8) -> u8 {
+/// const TABLE: [u8; 256] = [
+/// 255, 128, 61, 43, 50, 41, 27, 28, 57, 15, 21, 13, 24, 17, 17,
+/// 89, 58, 16, 11, 7, 14, 23, 7, 6, 24, 9, 6, 5, 9, 4, 7, 16,
+/// 68, 11, 9, 6, 88, 7, 4, 4, 23, 9, 4, 8, 8, 5, 10, 4, 30, 11,
+/// 9, 24, 11, 5, 5, 5, 19, 11, 6, 17, 9, 9, 6, 8,
+/// 48, 58, 11, 14, 53, 40, 9, 9, 254, 35, 3, 6, 52, 23, 6, 6, 27,
+/// 4, 7, 11, 14, 13, 10, 11, 11, 5, 2, 10, 16, 12, 6, 19,
+/// 19, 20, 5, 14, 16, 31, 19, 7, 14, 20, 4, 4, 19, 8, 18, 20, 24,
+/// 1, 25, 19, 58, 29, 10, 5, 15, 20, 2, 2, 9, 4, 3, 5,
+/// 51, 11, 4, 53, 23, 39, 6, 4, 13, 81, 4, 186, 5, 67, 3, 2, 15,
+/// 0, 0, 1, 3, 2, 0, 0, 5, 0, 0, 0, 2, 0, 0, 0,
+/// 12, 2, 1, 1, 3, 1, 1, 1, 6, 1, 2, 1, 3, 1, 1, 2, 9, 1, 1, 0,
+/// 2, 2, 4, 4, 11, 6, 7, 3, 6, 9, 4, 5,
+/// 46, 18, 8, 18, 17, 3, 8, 20, 16, 10, 3, 7, 175, 4, 6, 7, 13,
+/// 3, 7, 3, 3, 1, 3, 3, 10, 3, 1, 5, 2, 0, 1, 2,
+/// 16, 3, 5, 1, 6, 1, 1, 2, 58, 20, 3, 14, 12, 2, 1, 3, 16, 3, 5,
+/// 8, 3, 1, 8, 6, 17, 6, 5, 3, 8, 6, 13, 175,
+/// ];
+/// TABLE[byte as usize]
+/// }
+/// }
+/// // Create a new finder with the custom heuristic.
+/// let finder = FinderBuilder::new()
+/// .build_forward_with_ranker(Binary, b"\x00\x00\xdd\xdd");
+/// // Find needle with custom heuristic.
+/// assert!(finder.find(b"\x00\x00\x00\xdd\xdd").is_some());
+/// ```
+pub trait HeuristicFrequencyRank {
+ /// Return the heuristic frequency rank of the given byte. A lower rank
+ /// means the byte is believed to occur less frequently in the haystack.
+ ///
+ /// Some uses of this heuristic may treat arbitrary absolute rank values as
+ /// significant. For example, an implementation detail in this crate may
+ /// determine that heuristic prefilters are inappropriate if every byte in
+ /// the needle has a "high" rank.
+ fn rank(&self, byte: u8) -> u8;
+}
+
+/// The default byte frequency heuristic that is good for most haystacks.
+pub(crate) struct DefaultFrequencyRank;
+
+impl HeuristicFrequencyRank for DefaultFrequencyRank {
+ fn rank(&self, byte: u8) -> u8 {
+ self::default_rank::RANK[usize::from(byte)]
+ }
+}
+
+/// This permits passing any implementation of `HeuristicFrequencyRank` as a
+/// borrowed version of itself.
+impl<'a, R> HeuristicFrequencyRank for &'a R
+where
+ R: HeuristicFrequencyRank,
+{
+ fn rank(&self, byte: u8) -> u8 {
+ (**self).rank(byte)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn forward_packedpair() {
+ fn find(
+ haystack: &[u8],
+ needle: &[u8],
+ _index1: u8,
+ _index2: u8,
+ ) -> Option<Option<usize>> {
+ // We ignore the index positions requested since it winds up making
+ // this test too slow overall.
+ let f = Finder::new(needle)?;
+ Some(f.find_prefilter(haystack))
+ }
+ crate::tests::packedpair::Runner::new().fwd(find).run()
+ }
+}
diff --git a/vendor/memchr/src/arch/all/rabinkarp.rs b/vendor/memchr/src/arch/all/rabinkarp.rs
new file mode 100644
index 000000000..e0bafbac9
--- /dev/null
+++ b/vendor/memchr/src/arch/all/rabinkarp.rs
@@ -0,0 +1,390 @@
+/*!
+An implementation of the [Rabin-Karp substring search algorithm][rabinkarp].
+
+Rabin-Karp works by creating a hash of the needle provided and then computing
+a rolling hash for each needle sized window in the haystack. When the rolling
+hash matches the hash of the needle, a byte-wise comparison is done to check
+if a match exists. The worst case time complexity of Rabin-Karp is `O(m *
+n)` where `m ~ len(needle)` and `n ~ len(haystack)`. Its worst case space
+complexity is constant.
+
+The main utility of Rabin-Karp is that the searcher can be constructed very
+quickly with very little memory. This makes it especially useful when searching
+for small needles in small haystacks, as it might finish its search before a
+beefier algorithm (like Two-Way) even starts.
+
+[rabinkarp]: https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm
+*/
+
+/*
+(This was the comment I wrote for this module originally when it was not
+exposed. The comment still looks useful, but it's a bit in the weeds, so it's
+not public itself.)
+
+This module implements the classical Rabin-Karp substring search algorithm,
+with no extra frills. While its use would seem to break our time complexity
+guarantee of O(m+n) (RK's time complexity is O(mn)), we are careful to only
+ever use RK on a constant subset of haystacks. The main point here is that
+RK has good latency properties for small needles/haystacks. It's very quick
+to compute a needle hash and zip through the haystack when compared to
+initializing Two-Way, for example. And this is especially useful for cases
+where the haystack is just too short for vector instructions to do much good.
+
+The hashing function used here is the same one recommended by ESMAJ.
+
+Another choice instead of Rabin-Karp would be Shift-Or. But its latency
+isn't quite as good since its preprocessing time is a bit more expensive
+(both in practice and in theory). However, perhaps Shift-Or has a place
+somewhere else for short patterns. I think the main problem is that it
+requires space proportional to the alphabet and the needle. If we, for
+example, supported needles up to length 16, then the total table size would be
+len(alphabet)*size_of::<u16>()==512 bytes. Which isn't exactly small, and it's
+probably bad to put that on the stack. So ideally, we'd throw it on the heap,
+but we'd really like to write as much code without using alloc/std as possible.
+But maybe it's worth the special casing. It's a TODO to benchmark.
+
+Wikipedia has a decent explanation, if a bit heavy on the theory:
+https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm
+
+But ESMAJ provides something a bit more concrete:
+http://www-igm.univ-mlv.fr/~lecroq/string/node5.html
+
+Finally, aho-corasick uses Rabin-Karp for multiple pattern match in some cases:
+https://github.com/BurntSushi/aho-corasick/blob/3852632f10587db0ff72ef29e88d58bf305a0946/src/packed/rabinkarp.rs
+*/
+
+use crate::ext::Pointer;
+
+/// A forward substring searcher using the Rabin-Karp algorithm.
+///
+/// Note that, as a lower level API, a `Finder` does not have access to the
+/// needle it was constructed with. For this reason, executing a search
+/// with a `Finder` requires passing both the needle and the haystack,
+/// where the needle is exactly equivalent to the one given to the `Finder`
+/// at construction time. This design was chosen so that callers can have
+/// more precise control over where and how many times a needle is stored.
+/// For example, in cases where Rabin-Karp is just one of several possible
+/// substring search algorithms.
+#[derive(Clone, Debug)]
+pub struct Finder {
+ /// The actual hash.
+ hash: Hash,
+ /// The factor needed to multiply a byte by in order to subtract it from
+ /// the hash. It is defined to be 2^(n-1) (using wrapping exponentiation),
+ /// where n is the length of the needle. This is how we "remove" a byte
+ /// from the hash once the hash window rolls past it.
+ hash_2pow: u32,
+}
+
+impl Finder {
+ /// Create a new Rabin-Karp forward searcher for the given `needle`.
+ ///
+ /// The needle may be empty. The empty needle matches at every byte offset.
+ ///
+ /// Note that callers must pass the same needle to all search calls using
+ /// this `Finder`.
+ #[inline]
+ pub fn new(needle: &[u8]) -> Finder {
+ let mut s = Finder { hash: Hash::new(), hash_2pow: 1 };
+ let first_byte = match needle.get(0) {
+ None => return s,
+ Some(&first_byte) => first_byte,
+ };
+ s.hash.add(first_byte);
+ for b in needle.iter().copied().skip(1) {
+ s.hash.add(b);
+ s.hash_2pow = s.hash_2pow.wrapping_shl(1);
+ }
+ s
+ }
+
+ /// Return the first occurrence of the `needle` in the `haystack`
+ /// given. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The `needle` provided must match the needle given to this finder at
+ /// construction time.
+ ///
+ /// The maximum value this can return is `haystack.len()`, which can only
+ /// occur when the needle and haystack both have length zero. Otherwise,
+ /// for non-empty haystacks, the maximum value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> {
+ unsafe {
+ let hstart = haystack.as_ptr();
+ let hend = hstart.add(haystack.len());
+ let nstart = needle.as_ptr();
+ let nend = nstart.add(needle.len());
+ let found = self.find_raw(hstart, hend, nstart, nend)?;
+ Some(found.distance(hstart))
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `<= end`. The pointer returned is only ever equivalent
+ /// to `end` when both the needle and haystack are empty. (That is, the
+ /// empty string matches the empty string.)
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// Note that `start` and `end` below refer to both pairs of pointers given
+ /// to this routine. That is, the conditions apply to both `hstart`/`hend`
+ /// and `nstart`/`nend`.
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ /// * It must be the case that `start <= end`.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ hstart: *const u8,
+ hend: *const u8,
+ nstart: *const u8,
+ nend: *const u8,
+ ) -> Option<*const u8> {
+ let hlen = hend.distance(hstart);
+ let nlen = nend.distance(nstart);
+ if nlen > hlen {
+ return None;
+ }
+ let mut cur = hstart;
+ let end = hend.sub(nlen);
+ let mut hash = Hash::forward(cur, cur.add(nlen));
+ loop {
+ if self.hash == hash && is_equal_raw(cur, nstart, nlen) {
+ return Some(cur);
+ }
+ if cur >= end {
+ return None;
+ }
+ hash.roll(self, cur.read(), cur.add(nlen).read());
+ cur = cur.add(1);
+ }
+ }
+}
+
+/// A reverse substring searcher using the Rabin-Karp algorithm.
+#[derive(Clone, Debug)]
+pub struct FinderRev(Finder);
+
+impl FinderRev {
+ /// Create a new Rabin-Karp reverse searcher for the given `needle`.
+ #[inline]
+ pub fn new(needle: &[u8]) -> FinderRev {
+ let mut s = FinderRev(Finder { hash: Hash::new(), hash_2pow: 1 });
+ let last_byte = match needle.last() {
+ None => return s,
+ Some(&last_byte) => last_byte,
+ };
+ s.0.hash.add(last_byte);
+ for b in needle.iter().rev().copied().skip(1) {
+ s.0.hash.add(b);
+ s.0.hash_2pow = s.0.hash_2pow.wrapping_shl(1);
+ }
+ s
+ }
+
+ /// Return the last occurrence of the `needle` in the `haystack`
+ /// given. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The `needle` provided must match the needle given to this finder at
+ /// construction time.
+ ///
+ /// The maximum value this can return is `haystack.len()`, which can only
+ /// occur when the needle and haystack both have length zero. Otherwise,
+ /// for non-empty haystacks, the maximum value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> {
+ unsafe {
+ let hstart = haystack.as_ptr();
+ let hend = hstart.add(haystack.len());
+ let nstart = needle.as_ptr();
+ let nend = nstart.add(needle.len());
+ let found = self.rfind_raw(hstart, hend, nstart, nend)?;
+ Some(found.distance(hstart))
+ }
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `<= end`. The pointer returned is only ever equivalent
+ /// to `end` when both the needle and haystack are empty. (That is, the
+ /// empty string matches the empty string.)
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// Note that `start` and `end` below refer to both pairs of pointers given
+ /// to this routine. That is, the conditions apply to both `hstart`/`hend`
+ /// and `nstart`/`nend`.
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ /// * It must be the case that `start <= end`.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ hstart: *const u8,
+ hend: *const u8,
+ nstart: *const u8,
+ nend: *const u8,
+ ) -> Option<*const u8> {
+ let hlen = hend.distance(hstart);
+ let nlen = nend.distance(nstart);
+ if nlen > hlen {
+ return None;
+ }
+ let mut cur = hend.sub(nlen);
+ let start = hstart;
+ let mut hash = Hash::reverse(cur, cur.add(nlen));
+ loop {
+ if self.0.hash == hash && is_equal_raw(cur, nstart, nlen) {
+ return Some(cur);
+ }
+ if cur <= start {
+ return None;
+ }
+ cur = cur.sub(1);
+ hash.roll(&self.0, cur.add(nlen).read(), cur.read());
+ }
+ }
+}
+
+/// Whether RK is believed to be very fast for the given needle/haystack.
+#[inline]
+pub(crate) fn is_fast(haystack: &[u8], _needle: &[u8]) -> bool {
+ haystack.len() < 16
+}
+
+/// A Rabin-Karp hash. This might represent the hash of a needle, or the hash
+/// of a rolling window in the haystack.
+#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
+struct Hash(u32);
+
+impl Hash {
+ /// Create a new hash that represents the empty string.
+ #[inline(always)]
+ fn new() -> Hash {
+ Hash(0)
+ }
+
+ /// Create a new hash from the bytes given for use in forward searches.
+ ///
+ /// # Safety
+ ///
+ /// The given pointers must be valid to read from within their range.
+ #[inline(always)]
+ unsafe fn forward(mut start: *const u8, end: *const u8) -> Hash {
+ let mut hash = Hash::new();
+ while start < end {
+ hash.add(start.read());
+ start = start.add(1);
+ }
+ hash
+ }
+
+ /// Create a new hash from the bytes given for use in reverse searches.
+ ///
+ /// # Safety
+ ///
+ /// The given pointers must be valid to read from within their range.
+ #[inline(always)]
+ unsafe fn reverse(start: *const u8, mut end: *const u8) -> Hash {
+ let mut hash = Hash::new();
+ while start < end {
+ end = end.sub(1);
+ hash.add(end.read());
+ }
+ hash
+ }
+
+ /// Add 'new' and remove 'old' from this hash. The given needle hash should
+ /// correspond to the hash computed for the needle being searched for.
+ ///
+ /// This is meant to be used when the rolling window of the haystack is
+ /// advanced.
+ #[inline(always)]
+ fn roll(&mut self, finder: &Finder, old: u8, new: u8) {
+ self.del(finder, old);
+ self.add(new);
+ }
+
+ /// Add a byte to this hash.
+ #[inline(always)]
+ fn add(&mut self, byte: u8) {
+ self.0 = self.0.wrapping_shl(1).wrapping_add(u32::from(byte));
+ }
+
+ /// Remove a byte from this hash. The given needle hash should correspond
+ /// to the hash computed for the needle being searched for.
+ #[inline(always)]
+ fn del(&mut self, finder: &Finder, byte: u8) {
+ let factor = finder.hash_2pow;
+ self.0 = self.0.wrapping_sub(u32::from(byte).wrapping_mul(factor));
+ }
+}
+
+/// Returns true when `x[i] == y[i]` for all `0 <= i < n`.
+///
+/// We forcefully don't inline this to hint at the compiler that it is unlikely
+/// to be called. This causes the inner rabinkarp loop above to be a bit
+/// tighter and leads to some performance improvement. See the
+/// memmem/krate/prebuilt/sliceslice-words/words benchmark.
+///
+/// # Safety
+///
+/// Same as `crate::arch::all::is_equal_raw`.
+#[cold]
+#[inline(never)]
+unsafe fn is_equal_raw(x: *const u8, y: *const u8, n: usize) -> bool {
+ crate::arch::all::is_equal_raw(x, y, n)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ define_substring_forward_quickcheck!(|h, n| Some(
+ Finder::new(n).find(h, n)
+ ));
+ define_substring_reverse_quickcheck!(|h, n| Some(
+ FinderRev::new(n).rfind(h, n)
+ ));
+
+ #[test]
+ fn forward() {
+ crate::tests::substring::Runner::new()
+ .fwd(|h, n| Some(Finder::new(n).find(h, n)))
+ .run();
+ }
+
+ #[test]
+ fn reverse() {
+ crate::tests::substring::Runner::new()
+ .rev(|h, n| Some(FinderRev::new(n).rfind(h, n)))
+ .run();
+ }
+}
diff --git a/vendor/memchr/src/arch/all/shiftor.rs b/vendor/memchr/src/arch/all/shiftor.rs
new file mode 100644
index 000000000..b690564a6
--- /dev/null
+++ b/vendor/memchr/src/arch/all/shiftor.rs
@@ -0,0 +1,89 @@
+/*!
+An implementation of the [Shift-Or substring search algorithm][shiftor].
+
+[shiftor]: https://en.wikipedia.org/wiki/Bitap_algorithm
+*/
+
+use alloc::boxed::Box;
+
+/// The type of our mask.
+///
+/// While we don't expose anyway to configure this in the public API, if one
+/// really needs less memory usage or support for longer needles, then it is
+/// suggested to copy the code from this module and modify it to fit your
+/// needs. The code below is written to be correct regardless of whether Mask
+/// is a u8, u16, u32, u64 or u128.
+type Mask = u16;
+
+/// A forward substring searcher using the Shift-Or algorithm.
+#[derive(Debug)]
+pub struct Finder {
+ masks: Box<[Mask; 256]>,
+ needle_len: usize,
+}
+
+impl Finder {
+ const MAX_NEEDLE_LEN: usize = (Mask::BITS - 1) as usize;
+
+ /// Create a new Shift-Or forward searcher for the given `needle`.
+ ///
+ /// The needle may be empty. The empty needle matches at every byte offset.
+ #[inline]
+ pub fn new(needle: &[u8]) -> Option<Finder> {
+ let needle_len = needle.len();
+ if needle_len > Finder::MAX_NEEDLE_LEN {
+ // A match is found when bit 7 is set in 'result' in the search
+ // routine below. So our needle can't be bigger than 7. We could
+ // permit bigger needles by using u16, u32 or u64 for our mask
+ // entries. But this is all we need for this example.
+ return None;
+ }
+ let mut searcher = Finder { masks: Box::from([!0; 256]), needle_len };
+ for (i, &byte) in needle.iter().enumerate() {
+ searcher.masks[usize::from(byte)] &= !(1 << i);
+ }
+ Some(searcher)
+ }
+
+ /// Return the first occurrence of the needle given to `Finder::new` in
+ /// the `haystack` given. If no such occurrence exists, then `None` is
+ /// returned.
+ ///
+ /// Unlike most other substring search implementations in this crate, this
+ /// finder does not require passing the needle at search time. A match can
+ /// be determined without the needle at all since the required information
+ /// is already encoded into this finder at construction time.
+ ///
+ /// The maximum value this can return is `haystack.len()`, which can only
+ /// occur when the needle and haystack both have length zero. Otherwise,
+ /// for non-empty haystacks, the maximum value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ if self.needle_len == 0 {
+ return Some(0);
+ }
+ let mut result = !1;
+ for (i, &byte) in haystack.iter().enumerate() {
+ result |= self.masks[usize::from(byte)];
+ result <<= 1;
+ if result & (1 << self.needle_len) == 0 {
+ return Some(i + 1 - self.needle_len);
+ }
+ }
+ None
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ define_substring_forward_quickcheck!(|h, n| Some(Finder::new(n)?.find(h)));
+
+ #[test]
+ fn forward() {
+ crate::tests::substring::Runner::new()
+ .fwd(|h, n| Some(Finder::new(n)?.find(h)))
+ .run();
+ }
+}
diff --git a/vendor/memchr/src/memmem/twoway.rs b/vendor/memchr/src/arch/all/twoway.rs
index 7f82ed15d..0df3b4a86 100644
--- a/vendor/memchr/src/memmem/twoway.rs
+++ b/vendor/memchr/src/arch/all/twoway.rs
@@ -1,31 +1,62 @@
+/*!
+An implementation of the [Two-Way substring search algorithm][two-way].
+
+[`Finder`] can be built for forward searches, while [`FinderRev`] can be built
+for reverse searches.
+
+Two-Way makes for a nice general purpose substring search algorithm because of
+its time and space complexity properties. It also performs well in practice.
+Namely, with `m = len(needle)` and `n = len(haystack)`, Two-Way takes `O(m)`
+time to create a finder, `O(1)` space and `O(n)` search time. In other words,
+the preprocessing step is quick, doesn't require any heap memory and the worst
+case search time is guaranteed to be linear in the haystack regardless of the
+size of the needle.
+
+While vector algorithms will usually beat Two-Way handedly, vector algorithms
+also usually have pathological or edge cases that are better handled by Two-Way.
+Moreover, not all targets support vector algorithms or implementations for them
+simply may not exist yet.
+
+Two-Way can be found in the `memmem` implementations in at least [GNU libc] and
+[musl].
+
+[two-way]: https://en.wikipedia.org/wiki/Two-way_string-matching_algorithm
+[GNU libc]: https://www.gnu.org/software/libc/
+[musl]: https://www.musl-libc.org/
+*/
+
use core::cmp;
-use crate::memmem::{prefilter::Pre, util};
+use crate::{
+ arch::all::{is_prefix, is_suffix},
+ memmem::Pre,
+};
-/// Two-Way search in the forward direction.
+/// A forward substring searcher that uses the Two-Way algorithm.
#[derive(Clone, Copy, Debug)]
-pub(crate) struct Forward(TwoWay);
+pub struct Finder(TwoWay);
-/// Two-Way search in the reverse direction.
+/// A reverse substring searcher that uses the Two-Way algorithm.
#[derive(Clone, Copy, Debug)]
-pub(crate) struct Reverse(TwoWay);
+pub struct FinderRev(TwoWay);
-/// An implementation of the TwoWay substring search algorithm, with heuristics
-/// for accelerating search based on frequency analysis.
+/// An implementation of the TwoWay substring search algorithm.
///
/// This searcher supports forward and reverse search, although not
-/// simultaneously. It runs in O(n + m) time and O(1) space, where
+/// simultaneously. It runs in `O(n + m)` time and `O(1)` space, where
/// `n ~ len(needle)` and `m ~ len(haystack)`.
///
/// The implementation here roughly matches that which was developed by
/// Crochemore and Perrin in their 1991 paper "Two-way string-matching." The
/// changes in this implementation are 1) the use of zero-based indices, 2) a
/// heuristic skip table based on the last byte (borrowed from Rust's standard
-/// library) and 3) the addition of heuristics for a fast skip loop. That is,
-/// (3) this will detect bytes that are believed to be rare in the needle and
-/// use fast vectorized instructions to find their occurrences quickly. The
-/// Two-Way algorithm is then used to confirm whether a match at that location
-/// occurred.
+/// library) and 3) the addition of heuristics for a fast skip loop. For (3),
+/// callers can pass any kind of prefilter they want, but usually it's one
+/// based on a heuristic that uses an approximate background frequency of bytes
+/// to choose rare bytes to quickly look for candidate match positions. Note
+/// though that currently, this prefilter functionality is not exposed directly
+/// in the public API. (File an issue if you want it and provide a use case
+/// please.)
///
/// The heuristic for fast skipping is automatically shut off if it's
/// detected to be ineffective at search time. Generally, this only occurs in
@@ -36,20 +67,20 @@ pub(crate) struct Reverse(TwoWay);
/// likely necessary to read the Two-Way paper cited above in order to fully
/// grok this code. The essence of it is:
///
-/// 1) Do something to detect a "critical" position in the needle.
-/// 2) For the current position in the haystack, look if needle[critical..]
-/// matches at that position.
-/// 3) If so, look if needle[..critical] matches.
-/// 4) If a mismatch occurs, shift the search by some amount based on the
-/// critical position and a pre-computed shift.
+/// 1. Do something to detect a "critical" position in the needle.
+/// 2. For the current position in the haystack, look if `needle[critical..]`
+/// matches at that position.
+/// 3. If so, look if `needle[..critical]` matches.
+/// 4. If a mismatch occurs, shift the search by some amount based on the
+/// critical position and a pre-computed shift.
///
-/// This type is wrapped in Forward and Reverse types that expose consistent
-/// forward or reverse APIs.
+/// This type is wrapped in the forward and reverse finders that expose
+/// consistent forward or reverse APIs.
#[derive(Clone, Copy, Debug)]
struct TwoWay {
- /// A small bitset used as a quick prefilter (in addition to the faster
- /// SIMD based prefilter). Namely, a bit 'i' is set if and only if b%64==i
- /// for any b in the needle.
+ /// A small bitset used as a quick prefilter (in addition to any prefilter
+ /// given by the caller). Namely, a bit `i` is set if and only if `b%64==i`
+ /// for any `b == needle[i]`.
///
/// When used as a prefilter, if the last byte at the current candidate
/// position is NOT in this set, then we can skip that entire candidate
@@ -74,14 +105,13 @@ struct TwoWay {
shift: Shift,
}
-impl Forward {
- /// Create a searcher that uses the Two-Way algorithm by searching forwards
- /// through any haystack.
- pub(crate) fn new(needle: &[u8]) -> Forward {
- if needle.is_empty() {
- return Forward(TwoWay::empty());
- }
-
+impl Finder {
+ /// Create a searcher that finds occurrences of the given `needle`.
+ ///
+ /// An empty `needle` results in a match at every position in a haystack,
+ /// including at `haystack.len()`.
+ #[inline]
+ pub fn new(needle: &[u8]) -> Finder {
let byteset = ApproximateByteSet::new(needle);
let min_suffix = Suffix::forward(needle, SuffixKind::Minimal);
let max_suffix = Suffix::forward(needle, SuffixKind::Maximal);
@@ -92,27 +122,38 @@ impl Forward {
(max_suffix.period, max_suffix.pos)
};
let shift = Shift::forward(needle, period_lower_bound, critical_pos);
- Forward(TwoWay { byteset, critical_pos, shift })
+ Finder(TwoWay { byteset, critical_pos, shift })
}
- /// Find the position of the first occurrence of this searcher's needle in
- /// the given haystack. If one does not exist, then return None.
+ /// Returns the first occurrence of `needle` in the given `haystack`, or
+ /// `None` if no such occurrence could be found.
+ ///
+ /// The `needle` given must be the same as the `needle` provided to
+ /// [`Finder::new`].
///
- /// This accepts prefilter state that is useful when using the same
- /// searcher multiple times, such as in an iterator.
+ /// An empty `needle` results in a match at every position in a haystack,
+ /// including at `haystack.len()`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> {
+ self.find_with_prefilter(None, haystack, needle)
+ }
+
+ /// This is like [`Finder::find`], but it accepts a prefilter for
+ /// accelerating searches.
///
- /// Callers must guarantee that the needle is non-empty and its length is
- /// <= the haystack's length.
+ /// Currently this is not exposed in the public API because, at the time
+ /// of writing, I didn't want to spend time thinking about how to expose
+ /// the prefilter infrastructure (if at all). If you have a compelling use
+ /// case for exposing this routine, please create an issue. Do *not* open
+ /// a PR that just exposes `Pre` and friends. Exporting this routine will
+ /// require API design.
#[inline(always)]
- pub(crate) fn find(
+ pub(crate) fn find_with_prefilter(
&self,
- pre: Option<&mut Pre<'_>>,
+ pre: Option<Pre<'_>>,
haystack: &[u8],
needle: &[u8],
) -> Option<usize> {
- debug_assert!(!needle.is_empty(), "needle should not be empty");
- debug_assert!(needle.len() <= haystack.len(), "haystack too short");
-
match self.0.shift {
Shift::Small { period } => {
self.find_small_imp(pre, haystack, needle, period)
@@ -123,25 +164,6 @@ impl Forward {
}
}
- /// Like find, but handles the degenerate substring test cases. This is
- /// only useful for conveniently testing this substring implementation in
- /// isolation.
- #[cfg(test)]
- fn find_general(
- &self,
- pre: Option<&mut Pre<'_>>,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- if needle.is_empty() {
- Some(0)
- } else if haystack.len() < needle.len() {
- None
- } else {
- self.find(pre, haystack, needle)
- }
- }
-
// Each of the two search implementations below can be accelerated by a
// prefilter, but it is not always enabled. To avoid its overhead when
// its disabled, we explicitly inline each search implementation based on
@@ -151,19 +173,22 @@ impl Forward {
#[inline(always)]
fn find_small_imp(
&self,
- mut pre: Option<&mut Pre<'_>>,
+ mut pre: Option<Pre<'_>>,
haystack: &[u8],
needle: &[u8],
period: usize,
) -> Option<usize> {
- let last_byte = needle.len() - 1;
let mut pos = 0;
let mut shift = 0;
+ let last_byte_pos = match needle.len().checked_sub(1) {
+ None => return Some(pos),
+ Some(last_byte) => last_byte,
+ };
while pos + needle.len() <= haystack.len() {
let mut i = cmp::max(self.0.critical_pos, shift);
if let Some(pre) = pre.as_mut() {
- if pre.should_call() {
- pos += pre.call(&haystack[pos..], needle)?;
+ if pre.is_effective() {
+ pos += pre.find(&haystack[pos..])?;
shift = 0;
i = self.0.critical_pos;
if pos + needle.len() > haystack.len() {
@@ -171,7 +196,7 @@ impl Forward {
}
}
}
- if !self.0.byteset.contains(haystack[pos + last_byte]) {
+ if !self.0.byteset.contains(haystack[pos + last_byte_pos]) {
pos += needle.len();
shift = 0;
continue;
@@ -200,24 +225,27 @@ impl Forward {
#[inline(always)]
fn find_large_imp(
&self,
- mut pre: Option<&mut Pre<'_>>,
+ mut pre: Option<Pre<'_>>,
haystack: &[u8],
needle: &[u8],
shift: usize,
) -> Option<usize> {
- let last_byte = needle.len() - 1;
let mut pos = 0;
+ let last_byte_pos = match needle.len().checked_sub(1) {
+ None => return Some(pos),
+ Some(last_byte) => last_byte,
+ };
'outer: while pos + needle.len() <= haystack.len() {
if let Some(pre) = pre.as_mut() {
- if pre.should_call() {
- pos += pre.call(&haystack[pos..], needle)?;
+ if pre.is_effective() {
+ pos += pre.find(&haystack[pos..])?;
if pos + needle.len() > haystack.len() {
return None;
}
}
}
- if !self.0.byteset.contains(haystack[pos + last_byte]) {
+ if !self.0.byteset.contains(haystack[pos + last_byte_pos]) {
pos += needle.len();
continue;
}
@@ -241,14 +269,13 @@ impl Forward {
}
}
-impl Reverse {
- /// Create a searcher that uses the Two-Way algorithm by searching in
- /// reverse through any haystack.
- pub(crate) fn new(needle: &[u8]) -> Reverse {
- if needle.is_empty() {
- return Reverse(TwoWay::empty());
- }
-
+impl FinderRev {
+ /// Create a searcher that finds occurrences of the given `needle`.
+ ///
+ /// An empty `needle` results in a match at every position in a haystack,
+ /// including at `haystack.len()`.
+ #[inline]
+ pub fn new(needle: &[u8]) -> FinderRev {
let byteset = ApproximateByteSet::new(needle);
let min_suffix = Suffix::reverse(needle, SuffixKind::Minimal);
let max_suffix = Suffix::reverse(needle, SuffixKind::Maximal);
@@ -258,27 +285,20 @@ impl Reverse {
} else {
(max_suffix.period, max_suffix.pos)
};
- // let critical_pos = needle.len() - critical_pos;
let shift = Shift::reverse(needle, period_lower_bound, critical_pos);
- Reverse(TwoWay { byteset, critical_pos, shift })
+ FinderRev(TwoWay { byteset, critical_pos, shift })
}
- /// Find the position of the last occurrence of this searcher's needle
- /// in the given haystack. If one does not exist, then return None.
+ /// Returns the last occurrence of `needle` in the given `haystack`, or
+ /// `None` if no such occurrence could be found.
///
- /// This will automatically initialize prefilter state. This should only
- /// be used for one-off searches.
+ /// The `needle` given must be the same as the `needle` provided to
+ /// [`FinderRev::new`].
///
- /// Callers must guarantee that the needle is non-empty and its length is
- /// <= the haystack's length.
- #[inline(always)]
- pub(crate) fn rfind(
- &self,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- debug_assert!(!needle.is_empty(), "needle should not be empty");
- debug_assert!(needle.len() <= haystack.len(), "haystack too short");
+ /// An empty `needle` results in a match at every position in a haystack,
+ /// including at `haystack.len()`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> {
// For the reverse case, we don't use a prefilter. It's plausible that
// perhaps we should, but it's a lot of additional code to do it, and
// it's not clear that it's actually worth it. If you have a really
@@ -293,20 +313,6 @@ impl Reverse {
}
}
- /// Like rfind, but handles the degenerate substring test cases. This is
- /// only useful for conveniently testing this substring implementation in
- /// isolation.
- #[cfg(test)]
- fn rfind_general(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> {
- if needle.is_empty() {
- Some(haystack.len())
- } else if haystack.len() < needle.len() {
- None
- } else {
- self.rfind(haystack, needle)
- }
- }
-
#[inline(always)]
fn rfind_small_imp(
&self,
@@ -317,6 +323,10 @@ impl Reverse {
let nlen = needle.len();
let mut pos = haystack.len();
let mut shift = nlen;
+ let first_byte = match needle.get(0) {
+ None => return Some(pos),
+ Some(&first_byte) => first_byte,
+ };
while pos >= nlen {
if !self.0.byteset.contains(haystack[pos - nlen]) {
pos -= nlen;
@@ -327,7 +337,7 @@ impl Reverse {
while i > 0 && needle[i - 1] == haystack[pos - nlen + i - 1] {
i -= 1;
}
- if i > 0 || needle[0] != haystack[pos - nlen] {
+ if i > 0 || first_byte != haystack[pos - nlen] {
pos -= self.0.critical_pos - i + 1;
shift = nlen;
} else {
@@ -354,6 +364,10 @@ impl Reverse {
) -> Option<usize> {
let nlen = needle.len();
let mut pos = haystack.len();
+ let first_byte = match needle.get(0) {
+ None => return Some(pos),
+ Some(&first_byte) => first_byte,
+ };
while pos >= nlen {
if !self.0.byteset.contains(haystack[pos - nlen]) {
pos -= nlen;
@@ -363,7 +377,7 @@ impl Reverse {
while i > 0 && needle[i - 1] == haystack[pos - nlen + i - 1] {
i -= 1;
}
- if i > 0 || needle[0] != haystack[pos - nlen] {
+ if i > 0 || first_byte != haystack[pos - nlen] {
pos -= self.0.critical_pos - i + 1;
} else {
let mut j = self.0.critical_pos;
@@ -380,16 +394,6 @@ impl Reverse {
}
}
-impl TwoWay {
- fn empty() -> TwoWay {
- TwoWay {
- byteset: ApproximateByteSet::new(b""),
- critical_pos: 0,
- shift: Shift::Large { shift: 0 },
- }
- }
-}
-
/// A representation of the amount we're allowed to shift by during Two-Way
/// search.
///
@@ -444,7 +448,7 @@ impl Shift {
}
let (u, v) = needle.split_at(critical_pos);
- if !util::is_suffix(&v[..period_lower_bound], u) {
+ if !is_suffix(&v[..period_lower_bound], u) {
return Shift::Large { shift: large };
}
Shift::Small { period: period_lower_bound }
@@ -467,7 +471,7 @@ impl Shift {
}
let (v, u) = needle.split_at(critical_pos);
- if !util::is_prefix(&v[v.len() - period_lower_bound..], u) {
+ if !is_prefix(&v[v.len() - period_lower_bound..], u) {
return Shift::Large { shift: large };
}
Shift::Small { period: period_lower_bound }
@@ -494,8 +498,6 @@ struct Suffix {
impl Suffix {
fn forward(needle: &[u8], kind: SuffixKind) -> Suffix {
- debug_assert!(!needle.is_empty());
-
// suffix represents our maximal (or minimal) suffix, along with
// its period.
let mut suffix = Suffix { pos: 0, period: 1 };
@@ -544,14 +546,15 @@ impl Suffix {
}
fn reverse(needle: &[u8], kind: SuffixKind) -> Suffix {
- debug_assert!(!needle.is_empty());
-
// See the comments in `forward` for how this works.
let mut suffix = Suffix { pos: needle.len(), period: 1 };
if needle.len() == 1 {
return suffix;
}
- let mut candidate_start = needle.len() - 1;
+ let mut candidate_start = match needle.len().checked_sub(1) {
+ None => return suffix,
+ Some(candidate_start) => candidate_start,
+ };
let mut offset = 0;
while offset < candidate_start {
@@ -665,17 +668,12 @@ impl ApproximateByteSet {
}
}
-#[cfg(all(test, feature = "std", not(miri)))]
+#[cfg(test)]
mod tests {
- use quickcheck::quickcheck;
+ use alloc::vec::Vec;
use super::*;
- define_memmem_quickcheck_tests!(
- super::simpletests::twoway_find,
- super::simpletests::twoway_rfind
- );
-
/// Convenience wrapper for computing the suffix as a byte string.
fn get_suffix_forward(needle: &[u8], kind: SuffixKind) -> (&[u8], usize) {
let s = Suffix::forward(needle, kind);
@@ -710,13 +708,34 @@ mod tests {
got
}
+ define_substring_forward_quickcheck!(|h, n| Some(
+ Finder::new(n).find(h, n)
+ ));
+ define_substring_reverse_quickcheck!(|h, n| Some(
+ FinderRev::new(n).rfind(h, n)
+ ));
+
+ #[test]
+ fn forward() {
+ crate::tests::substring::Runner::new()
+ .fwd(|h, n| Some(Finder::new(n).find(h, n)))
+ .run();
+ }
+
+ #[test]
+ fn reverse() {
+ crate::tests::substring::Runner::new()
+ .rev(|h, n| Some(FinderRev::new(n).rfind(h, n)))
+ .run();
+ }
+
#[test]
fn suffix_forward() {
macro_rules! assert_suffix_min {
($given:expr, $expected:expr, $period:expr) => {
let (got_suffix, got_period) =
get_suffix_forward($given.as_bytes(), SuffixKind::Minimal);
- let got_suffix = std::str::from_utf8(got_suffix).unwrap();
+ let got_suffix = core::str::from_utf8(got_suffix).unwrap();
assert_eq!(($expected, $period), (got_suffix, got_period));
};
}
@@ -725,7 +744,7 @@ mod tests {
($given:expr, $expected:expr, $period:expr) => {
let (got_suffix, got_period) =
get_suffix_forward($given.as_bytes(), SuffixKind::Maximal);
- let got_suffix = std::str::from_utf8(got_suffix).unwrap();
+ let got_suffix = core::str::from_utf8(got_suffix).unwrap();
assert_eq!(($expected, $period), (got_suffix, got_period));
};
}
@@ -773,7 +792,7 @@ mod tests {
($given:expr, $expected:expr, $period:expr) => {
let (got_suffix, got_period) =
get_suffix_reverse($given.as_bytes(), SuffixKind::Minimal);
- let got_suffix = std::str::from_utf8(got_suffix).unwrap();
+ let got_suffix = core::str::from_utf8(got_suffix).unwrap();
assert_eq!(($expected, $period), (got_suffix, got_period));
};
}
@@ -782,7 +801,7 @@ mod tests {
($given:expr, $expected:expr, $period:expr) => {
let (got_suffix, got_period) =
get_suffix_reverse($given.as_bytes(), SuffixKind::Maximal);
- let got_suffix = std::str::from_utf8(got_suffix).unwrap();
+ let got_suffix = core::str::from_utf8(got_suffix).unwrap();
assert_eq!(($expected, $period), (got_suffix, got_period));
};
}
@@ -821,7 +840,8 @@ mod tests {
assert_suffix_max!("aaa", "aaa", 1);
}
- quickcheck! {
+ #[cfg(not(miri))]
+ quickcheck::quickcheck! {
fn qc_suffix_forward_maximal(bytes: Vec<u8>) -> bool {
if bytes.is_empty() {
return true;
@@ -842,27 +862,6 @@ mod tests {
expected == got
}
}
-}
-
-#[cfg(test)]
-mod simpletests {
- use super::*;
-
- pub(crate) fn twoway_find(
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- Forward::new(needle).find_general(None, haystack, needle)
- }
-
- pub(crate) fn twoway_rfind(
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- Reverse::new(needle).rfind_general(haystack, needle)
- }
-
- define_memmem_simple_tests!(twoway_find, twoway_rfind);
// This is a regression test caught by quickcheck that exercised a bug in
// the reverse small period handling. The bug was that we were using 'if j
@@ -870,7 +869,7 @@ mod simpletests {
// j >= shift', which matches the corresponding guard in the forward impl.
#[test]
fn regression_rev_small_period() {
- let rfind = super::simpletests::twoway_rfind;
+ let rfind = |h, n| FinderRev::new(n).rfind(h, n);
let haystack = "ababaz";
let needle = "abab";
assert_eq!(Some(0), rfind(haystack.as_bytes(), needle.as_bytes()));
diff --git a/vendor/memchr/src/arch/generic/memchr.rs b/vendor/memchr/src/arch/generic/memchr.rs
new file mode 100644
index 000000000..580b3cc1a
--- /dev/null
+++ b/vendor/memchr/src/arch/generic/memchr.rs
@@ -0,0 +1,1214 @@
+/*!
+Generic crate-internal routines for the `memchr` family of functions.
+*/
+
+// What follows is a vector algorithm generic over the specific vector
+// type to detect the position of one, two or three needles in a haystack.
+// From what I know, this is a "classic" algorithm, although I don't
+// believe it has been published in any peer reviewed journal. I believe
+// it can be found in places like glibc and Go's standard library. It
+// appears to be well known and is elaborated on in more detail here:
+// https://gms.tf/stdfind-and-memchr-optimizations.html
+//
+// While the routine below is fairly long and perhaps intimidating, the basic
+// idea is actually very simple and can be expressed straight-forwardly in
+// pseudo code. The psuedo code below is written for 128 bit vectors, but the
+// actual code below works for anything that implements the Vector trait.
+//
+// needle = (n1 << 15) | (n1 << 14) | ... | (n1 << 1) | n1
+// // Note: shift amount is in bytes
+//
+// while i <= haystack.len() - 16:
+// // A 16 byte vector. Each byte in chunk corresponds to a byte in
+// // the haystack.
+// chunk = haystack[i:i+16]
+// // Compare bytes in needle with bytes in chunk. The result is a 16
+// // byte chunk where each byte is 0xFF if the corresponding bytes
+// // in needle and chunk were equal, or 0x00 otherwise.
+// eqs = cmpeq(needle, chunk)
+// // Return a 32 bit integer where the most significant 16 bits
+// // are always 0 and the lower 16 bits correspond to whether the
+// // most significant bit in the correspond byte in `eqs` is set.
+// // In other words, `mask as u16` has bit i set if and only if
+// // needle[i] == chunk[i].
+// mask = movemask(eqs)
+//
+// // Mask is 0 if there is no match, and non-zero otherwise.
+// if mask != 0:
+// // trailing_zeros tells us the position of the least significant
+// // bit that is set.
+// return i + trailing_zeros(mask)
+//
+// // haystack length may not be a multiple of 16, so search the rest.
+// while i < haystack.len():
+// if haystack[i] == n1:
+// return i
+//
+// // No match found.
+// return NULL
+//
+// In fact, we could loosely translate the above code to Rust line-for-line
+// and it would be a pretty fast algorithm. But, we pull out all the stops
+// to go as fast as possible:
+//
+// 1. We use aligned loads. That is, we do some finagling to make sure our
+// primary loop not only proceeds in increments of 16 bytes, but that
+// the address of haystack's pointer that we dereference is aligned to
+// 16 bytes. 16 is a magic number here because it is the size of SSE2
+// 128-bit vector. (For the AVX2 algorithm, 32 is the magic number.)
+// Therefore, to get aligned loads, our pointer's address must be evenly
+// divisible by 16.
+// 2. Our primary loop proceeds 64 bytes at a time instead of 16. It's
+// kind of like loop unrolling, but we combine the equality comparisons
+// using a vector OR such that we only need to extract a single mask to
+// determine whether a match exists or not. If so, then we do some
+// book-keeping to determine the precise location but otherwise mush on.
+// 3. We use our "chunk" comparison routine in as many places as possible,
+// even if it means using unaligned loads. In particular, if haystack
+// starts with an unaligned address, then we do an unaligned load to
+// search the first 16 bytes. We then start our primary loop at the
+// smallest subsequent aligned address, which will actually overlap with
+// previously searched bytes. But we're OK with that. We do a similar
+// dance at the end of our primary loop. Finally, to avoid a
+// byte-at-a-time loop at the end, we do a final 16 byte unaligned load
+// that may overlap with a previous load. This is OK because it converts
+// a loop into a small number of very fast vector instructions. The overlap
+// is OK because we know the place where the overlap occurs does not
+// contain a match.
+//
+// And that's pretty all there is to it. Note that since the below is
+// generic and since it's meant to be inlined into routines with a
+// `#[target_feature(enable = "...")]` annotation, we must mark all routines as
+// both unsafe and `#[inline(always)]`.
+//
+// The fact that the code below is generic does somewhat inhibit us. For
+// example, I've noticed that introducing an unlineable `#[cold]` function to
+// handle the match case in the loop generates tighter assembly, but there is
+// no way to do this in the generic code below because the generic code doesn't
+// know what `target_feature` annotation to apply to the unlineable function.
+// We could make such functions part of the `Vector` trait, but we instead live
+// with the slightly sub-optimal codegen for now since it doesn't seem to have
+// a noticeable perf difference.
+
+use crate::{
+ ext::Pointer,
+ vector::{MoveMask, Vector},
+};
+
+/// Finds all occurrences of a single byte in a haystack.
+#[derive(Clone, Copy, Debug)]
+pub(crate) struct One<V> {
+ s1: u8,
+ v1: V,
+}
+
+impl<V: Vector> One<V> {
+ /// The number of bytes we examine per each iteration of our search loop.
+ const LOOP_SIZE: usize = 4 * V::BYTES;
+
+ /// Create a new searcher that finds occurrences of the byte given.
+ #[inline(always)]
+ pub(crate) unsafe fn new(needle: u8) -> One<V> {
+ One { s1: needle, v1: V::splat(needle) }
+ }
+
+ /// Returns the needle given to `One::new`.
+ #[inline(always)]
+ pub(crate) fn needle1(&self) -> u8 {
+ self.s1
+ }
+
+ /// Return a pointer to the first occurrence of the needle in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// # Safety
+ ///
+ /// * It must be the case that `start < end` and that the distance between
+ /// them is at least equal to `V::BYTES`. That is, it must always be valid
+ /// to do at least an unaligned load of `V` at `start`.
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ #[inline(always)]
+ pub(crate) unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ // If we want to support vectors bigger than 256 bits, we probably
+ // need to move up to using a u64 for the masks used below. Currently
+ // they are 32 bits, which means we're SOL for vectors that need masks
+ // bigger than 32 bits. Overall unclear until there's a use case.
+ debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
+
+ let topos = V::Mask::first_offset;
+ let len = end.distance(start);
+ debug_assert!(
+ len >= V::BYTES,
+ "haystack has length {}, but must be at least {}",
+ len,
+ V::BYTES
+ );
+
+ // Search a possibly unaligned chunk at `start`. This covers any part
+ // of the haystack prior to where aligned loads can start.
+ if let Some(cur) = self.search_chunk(start, topos) {
+ return Some(cur);
+ }
+ // Set `cur` to the first V-aligned pointer greater than `start`.
+ let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN));
+ debug_assert!(cur > start && end.sub(V::BYTES) >= start);
+ if len >= Self::LOOP_SIZE {
+ while cur <= end.sub(Self::LOOP_SIZE) {
+ debug_assert_eq!(0, cur.as_usize() % V::BYTES);
+
+ let a = V::load_aligned(cur);
+ let b = V::load_aligned(cur.add(1 * V::BYTES));
+ let c = V::load_aligned(cur.add(2 * V::BYTES));
+ let d = V::load_aligned(cur.add(3 * V::BYTES));
+ let eqa = self.v1.cmpeq(a);
+ let eqb = self.v1.cmpeq(b);
+ let eqc = self.v1.cmpeq(c);
+ let eqd = self.v1.cmpeq(d);
+ let or1 = eqa.or(eqb);
+ let or2 = eqc.or(eqd);
+ let or3 = or1.or(or2);
+ if or3.movemask_will_have_non_zero() {
+ let mask = eqa.movemask();
+ if mask.has_non_zero() {
+ return Some(cur.add(topos(mask)));
+ }
+
+ let mask = eqb.movemask();
+ if mask.has_non_zero() {
+ return Some(cur.add(1 * V::BYTES).add(topos(mask)));
+ }
+
+ let mask = eqc.movemask();
+ if mask.has_non_zero() {
+ return Some(cur.add(2 * V::BYTES).add(topos(mask)));
+ }
+
+ let mask = eqd.movemask();
+ debug_assert!(mask.has_non_zero());
+ return Some(cur.add(3 * V::BYTES).add(topos(mask)));
+ }
+ cur = cur.add(Self::LOOP_SIZE);
+ }
+ }
+ // Handle any leftovers after the aligned loop above. We use unaligned
+ // loads here, but I believe we are guaranteed that they are aligned
+ // since `cur` is aligned.
+ while cur <= end.sub(V::BYTES) {
+ debug_assert!(end.distance(cur) >= V::BYTES);
+ if let Some(cur) = self.search_chunk(cur, topos) {
+ return Some(cur);
+ }
+ cur = cur.add(V::BYTES);
+ }
+ // Finally handle any remaining bytes less than the size of V. In this
+ // case, our pointer may indeed be unaligned and the load may overlap
+ // with the previous one. But that's okay since we know the previous
+ // load didn't lead to a match (otherwise we wouldn't be here).
+ if cur < end {
+ debug_assert!(end.distance(cur) < V::BYTES);
+ cur = cur.sub(V::BYTES - end.distance(cur));
+ debug_assert_eq!(end.distance(cur), V::BYTES);
+ return self.search_chunk(cur, topos);
+ }
+ None
+ }
+
+ /// Return a pointer to the last occurrence of the needle in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// # Safety
+ ///
+ /// * It must be the case that `start < end` and that the distance between
+ /// them is at least equal to `V::BYTES`. That is, it must always be valid
+ /// to do at least an unaligned load of `V` at `start`.
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ #[inline(always)]
+ pub(crate) unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ // If we want to support vectors bigger than 256 bits, we probably
+ // need to move up to using a u64 for the masks used below. Currently
+ // they are 32 bits, which means we're SOL for vectors that need masks
+ // bigger than 32 bits. Overall unclear until there's a use case.
+ debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
+
+ let topos = V::Mask::last_offset;
+ let len = end.distance(start);
+ debug_assert!(
+ len >= V::BYTES,
+ "haystack has length {}, but must be at least {}",
+ len,
+ V::BYTES
+ );
+
+ if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) {
+ return Some(cur);
+ }
+ let mut cur = end.sub(end.as_usize() & V::ALIGN);
+ debug_assert!(start <= cur && cur <= end);
+ if len >= Self::LOOP_SIZE {
+ while cur >= start.add(Self::LOOP_SIZE) {
+ debug_assert_eq!(0, cur.as_usize() % V::BYTES);
+
+ cur = cur.sub(Self::LOOP_SIZE);
+ let a = V::load_aligned(cur);
+ let b = V::load_aligned(cur.add(1 * V::BYTES));
+ let c = V::load_aligned(cur.add(2 * V::BYTES));
+ let d = V::load_aligned(cur.add(3 * V::BYTES));
+ let eqa = self.v1.cmpeq(a);
+ let eqb = self.v1.cmpeq(b);
+ let eqc = self.v1.cmpeq(c);
+ let eqd = self.v1.cmpeq(d);
+ let or1 = eqa.or(eqb);
+ let or2 = eqc.or(eqd);
+ let or3 = or1.or(or2);
+ if or3.movemask_will_have_non_zero() {
+ let mask = eqd.movemask();
+ if mask.has_non_zero() {
+ return Some(cur.add(3 * V::BYTES).add(topos(mask)));
+ }
+
+ let mask = eqc.movemask();
+ if mask.has_non_zero() {
+ return Some(cur.add(2 * V::BYTES).add(topos(mask)));
+ }
+
+ let mask = eqb.movemask();
+ if mask.has_non_zero() {
+ return Some(cur.add(1 * V::BYTES).add(topos(mask)));
+ }
+
+ let mask = eqa.movemask();
+ debug_assert!(mask.has_non_zero());
+ return Some(cur.add(topos(mask)));
+ }
+ }
+ }
+ while cur >= start.add(V::BYTES) {
+ debug_assert!(cur.distance(start) >= V::BYTES);
+ cur = cur.sub(V::BYTES);
+ if let Some(cur) = self.search_chunk(cur, topos) {
+ return Some(cur);
+ }
+ }
+ if cur > start {
+ debug_assert!(cur.distance(start) < V::BYTES);
+ return self.search_chunk(start, topos);
+ }
+ None
+ }
+
+ /// Return a count of all matching bytes in the given haystack.
+ ///
+ /// # Safety
+ ///
+ /// * It must be the case that `start < end` and that the distance between
+ /// them is at least equal to `V::BYTES`. That is, it must always be valid
+ /// to do at least an unaligned load of `V` at `start`.
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ #[inline(always)]
+ pub(crate) unsafe fn count_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> usize {
+ debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
+
+ let confirm = |b| b == self.needle1();
+ let len = end.distance(start);
+ debug_assert!(
+ len >= V::BYTES,
+ "haystack has length {}, but must be at least {}",
+ len,
+ V::BYTES
+ );
+
+ // Set `cur` to the first V-aligned pointer greater than `start`.
+ let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN));
+ // Count any matching bytes before we start our aligned loop.
+ let mut count = count_byte_by_byte(start, cur, confirm);
+ debug_assert!(cur > start && end.sub(V::BYTES) >= start);
+ if len >= Self::LOOP_SIZE {
+ while cur <= end.sub(Self::LOOP_SIZE) {
+ debug_assert_eq!(0, cur.as_usize() % V::BYTES);
+
+ let a = V::load_aligned(cur);
+ let b = V::load_aligned(cur.add(1 * V::BYTES));
+ let c = V::load_aligned(cur.add(2 * V::BYTES));
+ let d = V::load_aligned(cur.add(3 * V::BYTES));
+ let eqa = self.v1.cmpeq(a);
+ let eqb = self.v1.cmpeq(b);
+ let eqc = self.v1.cmpeq(c);
+ let eqd = self.v1.cmpeq(d);
+ count += eqa.movemask().count_ones();
+ count += eqb.movemask().count_ones();
+ count += eqc.movemask().count_ones();
+ count += eqd.movemask().count_ones();
+ cur = cur.add(Self::LOOP_SIZE);
+ }
+ }
+ // Handle any leftovers after the aligned loop above. We use unaligned
+ // loads here, but I believe we are guaranteed that they are aligned
+ // since `cur` is aligned.
+ while cur <= end.sub(V::BYTES) {
+ debug_assert!(end.distance(cur) >= V::BYTES);
+ let chunk = V::load_unaligned(cur);
+ count += self.v1.cmpeq(chunk).movemask().count_ones();
+ cur = cur.add(V::BYTES);
+ }
+ // And finally count any leftovers that weren't caught above.
+ count += count_byte_by_byte(cur, end, confirm);
+ count
+ }
+
+ /// Search `V::BYTES` starting at `cur` via an unaligned load.
+ ///
+ /// `mask_to_offset` should be a function that converts a `movemask` to
+ /// an offset such that `cur.add(offset)` corresponds to a pointer to the
+ /// match location if one is found. Generally it is expected to use either
+ /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether
+ /// one is implementing a forward or reverse search, respectively.
+ ///
+ /// # Safety
+ ///
+ /// `cur` must be a valid pointer and it must be valid to do an unaligned
+ /// load of size `V::BYTES` at `cur`.
+ #[inline(always)]
+ unsafe fn search_chunk(
+ &self,
+ cur: *const u8,
+ mask_to_offset: impl Fn(V::Mask) -> usize,
+ ) -> Option<*const u8> {
+ let chunk = V::load_unaligned(cur);
+ let mask = self.v1.cmpeq(chunk).movemask();
+ if mask.has_non_zero() {
+ Some(cur.add(mask_to_offset(mask)))
+ } else {
+ None
+ }
+ }
+}
+
+/// Finds all occurrences of two bytes in a haystack.
+///
+/// That is, this reports matches of one of two possible bytes. For example,
+/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`,
+/// `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub(crate) struct Two<V> {
+ s1: u8,
+ s2: u8,
+ v1: V,
+ v2: V,
+}
+
+impl<V: Vector> Two<V> {
+ /// The number of bytes we examine per each iteration of our search loop.
+ const LOOP_SIZE: usize = 2 * V::BYTES;
+
+ /// Create a new searcher that finds occurrences of the byte given.
+ #[inline(always)]
+ pub(crate) unsafe fn new(needle1: u8, needle2: u8) -> Two<V> {
+ Two {
+ s1: needle1,
+ s2: needle2,
+ v1: V::splat(needle1),
+ v2: V::splat(needle2),
+ }
+ }
+
+ /// Returns the first needle given to `Two::new`.
+ #[inline(always)]
+ pub(crate) fn needle1(&self) -> u8 {
+ self.s1
+ }
+
+ /// Returns the second needle given to `Two::new`.
+ #[inline(always)]
+ pub(crate) fn needle2(&self) -> u8 {
+ self.s2
+ }
+
+ /// Return a pointer to the first occurrence of one of the needles in the
+ /// given haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// # Safety
+ ///
+ /// * It must be the case that `start < end` and that the distance between
+ /// them is at least equal to `V::BYTES`. That is, it must always be valid
+ /// to do at least an unaligned load of `V` at `start`.
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ #[inline(always)]
+ pub(crate) unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ // If we want to support vectors bigger than 256 bits, we probably
+ // need to move up to using a u64 for the masks used below. Currently
+ // they are 32 bits, which means we're SOL for vectors that need masks
+ // bigger than 32 bits. Overall unclear until there's a use case.
+ debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
+
+ let topos = V::Mask::first_offset;
+ let len = end.distance(start);
+ debug_assert!(
+ len >= V::BYTES,
+ "haystack has length {}, but must be at least {}",
+ len,
+ V::BYTES
+ );
+
+ // Search a possibly unaligned chunk at `start`. This covers any part
+ // of the haystack prior to where aligned loads can start.
+ if let Some(cur) = self.search_chunk(start, topos) {
+ return Some(cur);
+ }
+ // Set `cur` to the first V-aligned pointer greater than `start`.
+ let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN));
+ debug_assert!(cur > start && end.sub(V::BYTES) >= start);
+ if len >= Self::LOOP_SIZE {
+ while cur <= end.sub(Self::LOOP_SIZE) {
+ debug_assert_eq!(0, cur.as_usize() % V::BYTES);
+
+ let a = V::load_aligned(cur);
+ let b = V::load_aligned(cur.add(V::BYTES));
+ let eqa1 = self.v1.cmpeq(a);
+ let eqb1 = self.v1.cmpeq(b);
+ let eqa2 = self.v2.cmpeq(a);
+ let eqb2 = self.v2.cmpeq(b);
+ let or1 = eqa1.or(eqb1);
+ let or2 = eqa2.or(eqb2);
+ let or3 = or1.or(or2);
+ if or3.movemask_will_have_non_zero() {
+ let mask = eqa1.movemask().or(eqa2.movemask());
+ if mask.has_non_zero() {
+ return Some(cur.add(topos(mask)));
+ }
+
+ let mask = eqb1.movemask().or(eqb2.movemask());
+ debug_assert!(mask.has_non_zero());
+ return Some(cur.add(V::BYTES).add(topos(mask)));
+ }
+ cur = cur.add(Self::LOOP_SIZE);
+ }
+ }
+ // Handle any leftovers after the aligned loop above. We use unaligned
+ // loads here, but I believe we are guaranteed that they are aligned
+ // since `cur` is aligned.
+ while cur <= end.sub(V::BYTES) {
+ debug_assert!(end.distance(cur) >= V::BYTES);
+ if let Some(cur) = self.search_chunk(cur, topos) {
+ return Some(cur);
+ }
+ cur = cur.add(V::BYTES);
+ }
+ // Finally handle any remaining bytes less than the size of V. In this
+ // case, our pointer may indeed be unaligned and the load may overlap
+ // with the previous one. But that's okay since we know the previous
+ // load didn't lead to a match (otherwise we wouldn't be here).
+ if cur < end {
+ debug_assert!(end.distance(cur) < V::BYTES);
+ cur = cur.sub(V::BYTES - end.distance(cur));
+ debug_assert_eq!(end.distance(cur), V::BYTES);
+ return self.search_chunk(cur, topos);
+ }
+ None
+ }
+
+ /// Return a pointer to the last occurrence of the needle in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// # Safety
+ ///
+ /// * It must be the case that `start < end` and that the distance between
+ /// them is at least equal to `V::BYTES`. That is, it must always be valid
+ /// to do at least an unaligned load of `V` at `start`.
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ #[inline(always)]
+ pub(crate) unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ // If we want to support vectors bigger than 256 bits, we probably
+ // need to move up to using a u64 for the masks used below. Currently
+ // they are 32 bits, which means we're SOL for vectors that need masks
+ // bigger than 32 bits. Overall unclear until there's a use case.
+ debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
+
+ let topos = V::Mask::last_offset;
+ let len = end.distance(start);
+ debug_assert!(
+ len >= V::BYTES,
+ "haystack has length {}, but must be at least {}",
+ len,
+ V::BYTES
+ );
+
+ if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) {
+ return Some(cur);
+ }
+ let mut cur = end.sub(end.as_usize() & V::ALIGN);
+ debug_assert!(start <= cur && cur <= end);
+ if len >= Self::LOOP_SIZE {
+ while cur >= start.add(Self::LOOP_SIZE) {
+ debug_assert_eq!(0, cur.as_usize() % V::BYTES);
+
+ cur = cur.sub(Self::LOOP_SIZE);
+ let a = V::load_aligned(cur);
+ let b = V::load_aligned(cur.add(V::BYTES));
+ let eqa1 = self.v1.cmpeq(a);
+ let eqb1 = self.v1.cmpeq(b);
+ let eqa2 = self.v2.cmpeq(a);
+ let eqb2 = self.v2.cmpeq(b);
+ let or1 = eqa1.or(eqb1);
+ let or2 = eqa2.or(eqb2);
+ let or3 = or1.or(or2);
+ if or3.movemask_will_have_non_zero() {
+ let mask = eqb1.movemask().or(eqb2.movemask());
+ if mask.has_non_zero() {
+ return Some(cur.add(V::BYTES).add(topos(mask)));
+ }
+
+ let mask = eqa1.movemask().or(eqa2.movemask());
+ debug_assert!(mask.has_non_zero());
+ return Some(cur.add(topos(mask)));
+ }
+ }
+ }
+ while cur >= start.add(V::BYTES) {
+ debug_assert!(cur.distance(start) >= V::BYTES);
+ cur = cur.sub(V::BYTES);
+ if let Some(cur) = self.search_chunk(cur, topos) {
+ return Some(cur);
+ }
+ }
+ if cur > start {
+ debug_assert!(cur.distance(start) < V::BYTES);
+ return self.search_chunk(start, topos);
+ }
+ None
+ }
+
+ /// Search `V::BYTES` starting at `cur` via an unaligned load.
+ ///
+ /// `mask_to_offset` should be a function that converts a `movemask` to
+ /// an offset such that `cur.add(offset)` corresponds to a pointer to the
+ /// match location if one is found. Generally it is expected to use either
+ /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether
+ /// one is implementing a forward or reverse search, respectively.
+ ///
+ /// # Safety
+ ///
+ /// `cur` must be a valid pointer and it must be valid to do an unaligned
+ /// load of size `V::BYTES` at `cur`.
+ #[inline(always)]
+ unsafe fn search_chunk(
+ &self,
+ cur: *const u8,
+ mask_to_offset: impl Fn(V::Mask) -> usize,
+ ) -> Option<*const u8> {
+ let chunk = V::load_unaligned(cur);
+ let eq1 = self.v1.cmpeq(chunk);
+ let eq2 = self.v2.cmpeq(chunk);
+ let mask = eq1.or(eq2).movemask();
+ if mask.has_non_zero() {
+ let mask1 = eq1.movemask();
+ let mask2 = eq2.movemask();
+ Some(cur.add(mask_to_offset(mask1.or(mask2))))
+ } else {
+ None
+ }
+ }
+}
+
+/// Finds all occurrences of two bytes in a haystack.
+///
+/// That is, this reports matches of one of two possible bytes. For example,
+/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`,
+/// `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub(crate) struct Three<V> {
+ s1: u8,
+ s2: u8,
+ s3: u8,
+ v1: V,
+ v2: V,
+ v3: V,
+}
+
+impl<V: Vector> Three<V> {
+ /// The number of bytes we examine per each iteration of our search loop.
+ const LOOP_SIZE: usize = 2 * V::BYTES;
+
+ /// Create a new searcher that finds occurrences of the byte given.
+ #[inline(always)]
+ pub(crate) unsafe fn new(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ ) -> Three<V> {
+ Three {
+ s1: needle1,
+ s2: needle2,
+ s3: needle3,
+ v1: V::splat(needle1),
+ v2: V::splat(needle2),
+ v3: V::splat(needle3),
+ }
+ }
+
+ /// Returns the first needle given to `Three::new`.
+ #[inline(always)]
+ pub(crate) fn needle1(&self) -> u8 {
+ self.s1
+ }
+
+ /// Returns the second needle given to `Three::new`.
+ #[inline(always)]
+ pub(crate) fn needle2(&self) -> u8 {
+ self.s2
+ }
+
+ /// Returns the third needle given to `Three::new`.
+ #[inline(always)]
+ pub(crate) fn needle3(&self) -> u8 {
+ self.s3
+ }
+
+ /// Return a pointer to the first occurrence of one of the needles in the
+ /// given haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// # Safety
+ ///
+ /// * It must be the case that `start < end` and that the distance between
+ /// them is at least equal to `V::BYTES`. That is, it must always be valid
+ /// to do at least an unaligned load of `V` at `start`.
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ #[inline(always)]
+ pub(crate) unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ // If we want to support vectors bigger than 256 bits, we probably
+ // need to move up to using a u64 for the masks used below. Currently
+ // they are 32 bits, which means we're SOL for vectors that need masks
+ // bigger than 32 bits. Overall unclear until there's a use case.
+ debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
+
+ let topos = V::Mask::first_offset;
+ let len = end.distance(start);
+ debug_assert!(
+ len >= V::BYTES,
+ "haystack has length {}, but must be at least {}",
+ len,
+ V::BYTES
+ );
+
+ // Search a possibly unaligned chunk at `start`. This covers any part
+ // of the haystack prior to where aligned loads can start.
+ if let Some(cur) = self.search_chunk(start, topos) {
+ return Some(cur);
+ }
+ // Set `cur` to the first V-aligned pointer greater than `start`.
+ let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN));
+ debug_assert!(cur > start && end.sub(V::BYTES) >= start);
+ if len >= Self::LOOP_SIZE {
+ while cur <= end.sub(Self::LOOP_SIZE) {
+ debug_assert_eq!(0, cur.as_usize() % V::BYTES);
+
+ let a = V::load_aligned(cur);
+ let b = V::load_aligned(cur.add(V::BYTES));
+ let eqa1 = self.v1.cmpeq(a);
+ let eqb1 = self.v1.cmpeq(b);
+ let eqa2 = self.v2.cmpeq(a);
+ let eqb2 = self.v2.cmpeq(b);
+ let eqa3 = self.v3.cmpeq(a);
+ let eqb3 = self.v3.cmpeq(b);
+ let or1 = eqa1.or(eqb1);
+ let or2 = eqa2.or(eqb2);
+ let or3 = eqa3.or(eqb3);
+ let or4 = or1.or(or2);
+ let or5 = or3.or(or4);
+ if or5.movemask_will_have_non_zero() {
+ let mask = eqa1
+ .movemask()
+ .or(eqa2.movemask())
+ .or(eqa3.movemask());
+ if mask.has_non_zero() {
+ return Some(cur.add(topos(mask)));
+ }
+
+ let mask = eqb1
+ .movemask()
+ .or(eqb2.movemask())
+ .or(eqb3.movemask());
+ debug_assert!(mask.has_non_zero());
+ return Some(cur.add(V::BYTES).add(topos(mask)));
+ }
+ cur = cur.add(Self::LOOP_SIZE);
+ }
+ }
+ // Handle any leftovers after the aligned loop above. We use unaligned
+ // loads here, but I believe we are guaranteed that they are aligned
+ // since `cur` is aligned.
+ while cur <= end.sub(V::BYTES) {
+ debug_assert!(end.distance(cur) >= V::BYTES);
+ if let Some(cur) = self.search_chunk(cur, topos) {
+ return Some(cur);
+ }
+ cur = cur.add(V::BYTES);
+ }
+ // Finally handle any remaining bytes less than the size of V. In this
+ // case, our pointer may indeed be unaligned and the load may overlap
+ // with the previous one. But that's okay since we know the previous
+ // load didn't lead to a match (otherwise we wouldn't be here).
+ if cur < end {
+ debug_assert!(end.distance(cur) < V::BYTES);
+ cur = cur.sub(V::BYTES - end.distance(cur));
+ debug_assert_eq!(end.distance(cur), V::BYTES);
+ return self.search_chunk(cur, topos);
+ }
+ None
+ }
+
+ /// Return a pointer to the last occurrence of the needle in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// # Safety
+ ///
+ /// * It must be the case that `start < end` and that the distance between
+ /// them is at least equal to `V::BYTES`. That is, it must always be valid
+ /// to do at least an unaligned load of `V` at `start`.
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ #[inline(always)]
+ pub(crate) unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ // If we want to support vectors bigger than 256 bits, we probably
+ // need to move up to using a u64 for the masks used below. Currently
+ // they are 32 bits, which means we're SOL for vectors that need masks
+ // bigger than 32 bits. Overall unclear until there's a use case.
+ debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
+
+ let topos = V::Mask::last_offset;
+ let len = end.distance(start);
+ debug_assert!(
+ len >= V::BYTES,
+ "haystack has length {}, but must be at least {}",
+ len,
+ V::BYTES
+ );
+
+ if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) {
+ return Some(cur);
+ }
+ let mut cur = end.sub(end.as_usize() & V::ALIGN);
+ debug_assert!(start <= cur && cur <= end);
+ if len >= Self::LOOP_SIZE {
+ while cur >= start.add(Self::LOOP_SIZE) {
+ debug_assert_eq!(0, cur.as_usize() % V::BYTES);
+
+ cur = cur.sub(Self::LOOP_SIZE);
+ let a = V::load_aligned(cur);
+ let b = V::load_aligned(cur.add(V::BYTES));
+ let eqa1 = self.v1.cmpeq(a);
+ let eqb1 = self.v1.cmpeq(b);
+ let eqa2 = self.v2.cmpeq(a);
+ let eqb2 = self.v2.cmpeq(b);
+ let eqa3 = self.v3.cmpeq(a);
+ let eqb3 = self.v3.cmpeq(b);
+ let or1 = eqa1.or(eqb1);
+ let or2 = eqa2.or(eqb2);
+ let or3 = eqa3.or(eqb3);
+ let or4 = or1.or(or2);
+ let or5 = or3.or(or4);
+ if or5.movemask_will_have_non_zero() {
+ let mask = eqb1
+ .movemask()
+ .or(eqb2.movemask())
+ .or(eqb3.movemask());
+ if mask.has_non_zero() {
+ return Some(cur.add(V::BYTES).add(topos(mask)));
+ }
+
+ let mask = eqa1
+ .movemask()
+ .or(eqa2.movemask())
+ .or(eqa3.movemask());
+ debug_assert!(mask.has_non_zero());
+ return Some(cur.add(topos(mask)));
+ }
+ }
+ }
+ while cur >= start.add(V::BYTES) {
+ debug_assert!(cur.distance(start) >= V::BYTES);
+ cur = cur.sub(V::BYTES);
+ if let Some(cur) = self.search_chunk(cur, topos) {
+ return Some(cur);
+ }
+ }
+ if cur > start {
+ debug_assert!(cur.distance(start) < V::BYTES);
+ return self.search_chunk(start, topos);
+ }
+ None
+ }
+
+ /// Search `V::BYTES` starting at `cur` via an unaligned load.
+ ///
+ /// `mask_to_offset` should be a function that converts a `movemask` to
+ /// an offset such that `cur.add(offset)` corresponds to a pointer to the
+ /// match location if one is found. Generally it is expected to use either
+ /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether
+ /// one is implementing a forward or reverse search, respectively.
+ ///
+ /// # Safety
+ ///
+ /// `cur` must be a valid pointer and it must be valid to do an unaligned
+ /// load of size `V::BYTES` at `cur`.
+ #[inline(always)]
+ unsafe fn search_chunk(
+ &self,
+ cur: *const u8,
+ mask_to_offset: impl Fn(V::Mask) -> usize,
+ ) -> Option<*const u8> {
+ let chunk = V::load_unaligned(cur);
+ let eq1 = self.v1.cmpeq(chunk);
+ let eq2 = self.v2.cmpeq(chunk);
+ let eq3 = self.v3.cmpeq(chunk);
+ let mask = eq1.or(eq2).or(eq3).movemask();
+ if mask.has_non_zero() {
+ let mask1 = eq1.movemask();
+ let mask2 = eq2.movemask();
+ let mask3 = eq3.movemask();
+ Some(cur.add(mask_to_offset(mask1.or(mask2).or(mask3))))
+ } else {
+ None
+ }
+ }
+}
+
+/// An iterator over all occurrences of a set of bytes in a haystack.
+///
+/// This iterator implements the routines necessary to provide a
+/// `DoubleEndedIterator` impl, which means it can also be used to find
+/// occurrences in reverse order.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'h` refers to the lifetime of the haystack being searched.
+///
+/// This type is intended to be used to implement all iterators for the
+/// `memchr` family of functions. It handles a tiny bit of marginally tricky
+/// raw pointer math, but otherwise expects the caller to provide `find_raw`
+/// and `rfind_raw` routines for each call of `next` and `next_back`,
+/// respectively.
+#[derive(Clone, Debug)]
+pub(crate) struct Iter<'h> {
+ /// The original starting point into the haystack. We use this to convert
+ /// pointers to offsets.
+ original_start: *const u8,
+ /// The current starting point into the haystack. That is, where the next
+ /// search will begin.
+ start: *const u8,
+ /// The current ending point into the haystack. That is, where the next
+ /// reverse search will begin.
+ end: *const u8,
+ /// A marker for tracking the lifetime of the start/cur_start/cur_end
+ /// pointers above, which all point into the haystack.
+ haystack: core::marker::PhantomData<&'h [u8]>,
+}
+
+// SAFETY: Iter contains no shared references to anything that performs any
+// interior mutations. Also, the lifetime guarantees that Iter will not outlive
+// the haystack.
+unsafe impl<'h> Send for Iter<'h> {}
+
+// SAFETY: Iter perform no interior mutations, therefore no explicit
+// synchronization is necessary. Also, the lifetime guarantees that Iter will
+// not outlive the haystack.
+unsafe impl<'h> Sync for Iter<'h> {}
+
+impl<'h> Iter<'h> {
+ /// Create a new generic memchr iterator.
+ #[inline(always)]
+ pub(crate) fn new(haystack: &'h [u8]) -> Iter<'h> {
+ Iter {
+ original_start: haystack.as_ptr(),
+ start: haystack.as_ptr(),
+ end: haystack.as_ptr().wrapping_add(haystack.len()),
+ haystack: core::marker::PhantomData,
+ }
+ }
+
+ /// Returns the next occurrence in the forward direction.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that if a pointer is returned from the closure
+ /// provided, then it must be greater than or equal to the start pointer
+ /// and less than the end pointer.
+ #[inline(always)]
+ pub(crate) unsafe fn next(
+ &mut self,
+ mut find_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>,
+ ) -> Option<usize> {
+ // SAFETY: Pointers are derived directly from the same &[u8] haystack.
+ // We only ever modify start/end corresponding to a matching offset
+ // found between start and end. Thus all changes to start/end maintain
+ // our safety requirements.
+ //
+ // The only other assumption we rely on is that the pointer returned
+ // by `find_raw` satisfies `self.start <= found < self.end`, and that
+ // safety contract is forwarded to the caller.
+ let found = find_raw(self.start, self.end)?;
+ let result = found.distance(self.original_start);
+ self.start = found.add(1);
+ Some(result)
+ }
+
+ /// Returns the number of remaining elements in this iterator.
+ #[inline(always)]
+ pub(crate) fn count(
+ self,
+ mut count_raw: impl FnMut(*const u8, *const u8) -> usize,
+ ) -> usize {
+ // SAFETY: Pointers are derived directly from the same &[u8] haystack.
+ // We only ever modify start/end corresponding to a matching offset
+ // found between start and end. Thus all changes to start/end maintain
+ // our safety requirements.
+ count_raw(self.start, self.end)
+ }
+
+ /// Returns the next occurrence in reverse.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that if a pointer is returned from the closure
+ /// provided, then it must be greater than or equal to the start pointer
+ /// and less than the end pointer.
+ #[inline(always)]
+ pub(crate) unsafe fn next_back(
+ &mut self,
+ mut rfind_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>,
+ ) -> Option<usize> {
+ // SAFETY: Pointers are derived directly from the same &[u8] haystack.
+ // We only ever modify start/end corresponding to a matching offset
+ // found between start and end. Thus all changes to start/end maintain
+ // our safety requirements.
+ //
+ // The only other assumption we rely on is that the pointer returned
+ // by `rfind_raw` satisfies `self.start <= found < self.end`, and that
+ // safety contract is forwarded to the caller.
+ let found = rfind_raw(self.start, self.end)?;
+ let result = found.distance(self.original_start);
+ self.end = found;
+ Some(result)
+ }
+
+ /// Provides an implementation of `Iterator::size_hint`.
+ #[inline(always)]
+ pub(crate) fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(self.end.as_usize().saturating_sub(self.start.as_usize())))
+ }
+}
+
+/// Search a slice using a function that operates on raw pointers.
+///
+/// Given a function to search a contiguous sequence of memory for the location
+/// of a non-empty set of bytes, this will execute that search on a slice of
+/// bytes. The pointer returned by the given function will be converted to an
+/// offset relative to the starting point of the given slice. That is, if a
+/// match is found, the offset returned by this routine is guaranteed to be a
+/// valid index into `haystack`.
+///
+/// Callers may use this for a forward or reverse search.
+///
+/// # Safety
+///
+/// Callers must ensure that if a pointer is returned by `find_raw`, then the
+/// pointer must be greater than or equal to the starting pointer and less than
+/// the end pointer.
+#[inline(always)]
+pub(crate) unsafe fn search_slice_with_raw(
+ haystack: &[u8],
+ mut find_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>,
+) -> Option<usize> {
+ // SAFETY: We rely on `find_raw` to return a correct and valid pointer, but
+ // otherwise, `start` and `end` are valid due to the guarantees provided by
+ // a &[u8].
+ let start = haystack.as_ptr();
+ let end = start.add(haystack.len());
+ let found = find_raw(start, end)?;
+ Some(found.distance(start))
+}
+
+/// Performs a forward byte-at-a-time loop until either `ptr >= end_ptr` or
+/// until `confirm(*ptr)` returns `true`. If the former occurs, then `None` is
+/// returned. If the latter occurs, then the pointer at which `confirm` returns
+/// `true` is returned.
+///
+/// # Safety
+///
+/// Callers must provide valid pointers and they must satisfy `start_ptr <=
+/// ptr` and `ptr <= end_ptr`.
+#[inline(always)]
+pub(crate) unsafe fn fwd_byte_by_byte<F: Fn(u8) -> bool>(
+ start: *const u8,
+ end: *const u8,
+ confirm: F,
+) -> Option<*const u8> {
+ debug_assert!(start <= end);
+ let mut ptr = start;
+ while ptr < end {
+ if confirm(*ptr) {
+ return Some(ptr);
+ }
+ ptr = ptr.offset(1);
+ }
+ None
+}
+
+/// Performs a reverse byte-at-a-time loop until either `ptr < start_ptr` or
+/// until `confirm(*ptr)` returns `true`. If the former occurs, then `None` is
+/// returned. If the latter occurs, then the pointer at which `confirm` returns
+/// `true` is returned.
+///
+/// # Safety
+///
+/// Callers must provide valid pointers and they must satisfy `start_ptr <=
+/// ptr` and `ptr <= end_ptr`.
+#[inline(always)]
+pub(crate) unsafe fn rev_byte_by_byte<F: Fn(u8) -> bool>(
+ start: *const u8,
+ end: *const u8,
+ confirm: F,
+) -> Option<*const u8> {
+ debug_assert!(start <= end);
+
+ let mut ptr = end;
+ while ptr > start {
+ ptr = ptr.offset(-1);
+ if confirm(*ptr) {
+ return Some(ptr);
+ }
+ }
+ None
+}
+
+/// Performs a forward byte-at-a-time loop until `ptr >= end_ptr` and returns
+/// the number of times `confirm(*ptr)` returns `true`.
+///
+/// # Safety
+///
+/// Callers must provide valid pointers and they must satisfy `start_ptr <=
+/// ptr` and `ptr <= end_ptr`.
+#[inline(always)]
+pub(crate) unsafe fn count_byte_by_byte<F: Fn(u8) -> bool>(
+ start: *const u8,
+ end: *const u8,
+ confirm: F,
+) -> usize {
+ debug_assert!(start <= end);
+ let mut ptr = start;
+ let mut count = 0;
+ while ptr < end {
+ if confirm(*ptr) {
+ count += 1;
+ }
+ ptr = ptr.offset(1);
+ }
+ count
+}
diff --git a/vendor/memchr/src/arch/generic/mod.rs b/vendor/memchr/src/arch/generic/mod.rs
new file mode 100644
index 000000000..63ee3f0b3
--- /dev/null
+++ b/vendor/memchr/src/arch/generic/mod.rs
@@ -0,0 +1,14 @@
+/*!
+This module defines "generic" routines that can be specialized to specific
+architectures.
+
+We don't expose this module primarily because it would require exposing all
+of the internal infrastructure required to write these generic routines.
+That infrastructure should be treated as an implementation detail so that
+it is allowed to evolve. Instead, what we expose are architecture specific
+instantiations of these generic implementations. The generic code just lets us
+write the code once (usually).
+*/
+
+pub(crate) mod memchr;
+pub(crate) mod packedpair;
diff --git a/vendor/memchr/src/arch/generic/packedpair.rs b/vendor/memchr/src/arch/generic/packedpair.rs
new file mode 100644
index 000000000..8d97cf28f
--- /dev/null
+++ b/vendor/memchr/src/arch/generic/packedpair.rs
@@ -0,0 +1,317 @@
+/*!
+Generic crate-internal routines for the "packed pair" SIMD algorithm.
+
+The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main
+difference is that it (by default) uses a background distribution of byte
+frequencies to heuristically select the pair of bytes to search for.
+
+[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last
+*/
+
+use crate::{
+ arch::all::{is_equal_raw, packedpair::Pair},
+ ext::Pointer,
+ vector::{MoveMask, Vector},
+};
+
+/// A generic architecture dependent "packed pair" finder.
+///
+/// This finder picks two bytes that it believes have high predictive power
+/// for indicating an overall match of a needle. Depending on whether
+/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets
+/// where the needle matches or could match. In the prefilter case, candidates
+/// are reported whenever the [`Pair`] of bytes given matches.
+///
+/// This is architecture dependent because it uses specific vector operations
+/// to look for occurrences of the pair of bytes.
+///
+/// This type is not meant to be exported and is instead meant to be used as
+/// the implementation for architecture specific facades. Why? Because it's a
+/// bit of a quirky API that requires `inline(always)` annotations. And pretty
+/// much everything has safety obligations due (at least) to the caller needing
+/// to inline calls into routines marked with
+/// `#[target_feature(enable = "...")]`.
+#[derive(Clone, Copy, Debug)]
+pub(crate) struct Finder<V> {
+ pair: Pair,
+ v1: V,
+ v2: V,
+ min_haystack_len: usize,
+}
+
+impl<V: Vector> Finder<V> {
+ /// Create a new pair searcher. The searcher returned can either report
+ /// exact matches of `needle` or act as a prefilter and report candidate
+ /// positions of `needle`.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that whatever vector type this routine is called
+ /// with is supported by the current environment.
+ ///
+ /// Callers must also ensure that `needle.len() >= 2`.
+ #[inline(always)]
+ pub(crate) unsafe fn new(needle: &[u8], pair: Pair) -> Finder<V> {
+ let max_index = pair.index1().max(pair.index2());
+ let min_haystack_len =
+ core::cmp::max(needle.len(), usize::from(max_index) + V::BYTES);
+ let v1 = V::splat(needle[usize::from(pair.index1())]);
+ let v2 = V::splat(needle[usize::from(pair.index2())]);
+ Finder { pair, v1, v2, min_haystack_len }
+ }
+
+ /// Searches the given haystack for the given needle. The needle given
+ /// should be the same as the needle that this finder was initialized
+ /// with.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ ///
+ /// # Safety
+ ///
+ /// Since this is meant to be used with vector functions, callers need to
+ /// specialize this inside of a function with a `target_feature` attribute.
+ /// Therefore, callers must ensure that whatever target feature is being
+ /// used supports the vector functions that this function is specialized
+ /// for. (For the specific vector functions used, see the Vector trait
+ /// implementations.)
+ #[inline(always)]
+ pub(crate) unsafe fn find(
+ &self,
+ haystack: &[u8],
+ needle: &[u8],
+ ) -> Option<usize> {
+ assert!(
+ haystack.len() >= self.min_haystack_len,
+ "haystack too small, should be at least {} but got {}",
+ self.min_haystack_len,
+ haystack.len(),
+ );
+
+ let all = V::Mask::all_zeros_except_least_significant(0);
+ let start = haystack.as_ptr();
+ let end = start.add(haystack.len());
+ let max = end.sub(self.min_haystack_len);
+ let mut cur = start;
+
+ // N.B. I did experiment with unrolling the loop to deal with size(V)
+ // bytes at a time and 2*size(V) bytes at a time. The double unroll
+ // was marginally faster while the quadruple unroll was unambiguously
+ // slower. In the end, I decided the complexity from unrolling wasn't
+ // worth it. I used the memmem/krate/prebuilt/huge-en/ benchmarks to
+ // compare.
+ while cur <= max {
+ if let Some(chunki) = self.find_in_chunk(needle, cur, end, all) {
+ return Some(matched(start, cur, chunki));
+ }
+ cur = cur.add(V::BYTES);
+ }
+ if cur < end {
+ let remaining = end.distance(cur);
+ debug_assert!(
+ remaining < self.min_haystack_len,
+ "remaining bytes should be smaller than the minimum haystack \
+ length of {}, but there are {} bytes remaining",
+ self.min_haystack_len,
+ remaining,
+ );
+ if remaining < needle.len() {
+ return None;
+ }
+ debug_assert!(
+ max < cur,
+ "after main loop, cur should have exceeded max",
+ );
+ let overlap = cur.distance(max);
+ debug_assert!(
+ overlap > 0,
+ "overlap ({}) must always be non-zero",
+ overlap,
+ );
+ debug_assert!(
+ overlap < V::BYTES,
+ "overlap ({}) cannot possibly be >= than a vector ({})",
+ overlap,
+ V::BYTES,
+ );
+ // The mask has all of its bits set except for the first N least
+ // significant bits, where N=overlap. This way, any matches that
+ // occur in find_in_chunk within the overlap are automatically
+ // ignored.
+ let mask = V::Mask::all_zeros_except_least_significant(overlap);
+ cur = max;
+ let m = self.find_in_chunk(needle, cur, end, mask);
+ if let Some(chunki) = m {
+ return Some(matched(start, cur, chunki));
+ }
+ }
+ None
+ }
+
+ /// Searches the given haystack for offsets that represent candidate
+ /// matches of the `needle` given to this finder's constructor. The offsets
+ /// returned, if they are a match, correspond to the starting offset of
+ /// `needle` in the given `haystack`.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ ///
+ /// # Safety
+ ///
+ /// Since this is meant to be used with vector functions, callers need to
+ /// specialize this inside of a function with a `target_feature` attribute.
+ /// Therefore, callers must ensure that whatever target feature is being
+ /// used supports the vector functions that this function is specialized
+ /// for. (For the specific vector functions used, see the Vector trait
+ /// implementations.)
+ #[inline(always)]
+ pub(crate) unsafe fn find_prefilter(
+ &self,
+ haystack: &[u8],
+ ) -> Option<usize> {
+ assert!(
+ haystack.len() >= self.min_haystack_len,
+ "haystack too small, should be at least {} but got {}",
+ self.min_haystack_len,
+ haystack.len(),
+ );
+
+ let start = haystack.as_ptr();
+ let end = start.add(haystack.len());
+ let max = end.sub(self.min_haystack_len);
+ let mut cur = start;
+
+ // N.B. I did experiment with unrolling the loop to deal with size(V)
+ // bytes at a time and 2*size(V) bytes at a time. The double unroll
+ // was marginally faster while the quadruple unroll was unambiguously
+ // slower. In the end, I decided the complexity from unrolling wasn't
+ // worth it. I used the memmem/krate/prebuilt/huge-en/ benchmarks to
+ // compare.
+ while cur <= max {
+ if let Some(chunki) = self.find_prefilter_in_chunk(cur) {
+ return Some(matched(start, cur, chunki));
+ }
+ cur = cur.add(V::BYTES);
+ }
+ if cur < end {
+ // This routine immediately quits if a candidate match is found.
+ // That means that if we're here, no candidate matches have been
+ // found at or before 'ptr'. Thus, we don't need to mask anything
+ // out even though we might technically search part of the haystack
+ // that we've already searched (because we know it can't match).
+ cur = max;
+ if let Some(chunki) = self.find_prefilter_in_chunk(cur) {
+ return Some(matched(start, cur, chunki));
+ }
+ }
+ None
+ }
+
+ /// Search for an occurrence of our byte pair from the needle in the chunk
+ /// pointed to by cur, with the end of the haystack pointed to by end.
+ /// When an occurrence is found, memcmp is run to check if a match occurs
+ /// at the corresponding position.
+ ///
+ /// `mask` should have bits set corresponding the positions in the chunk
+ /// in which matches are considered. This is only used for the last vector
+ /// load where the beginning of the vector might have overlapped with the
+ /// last load in the main loop. The mask lets us avoid visiting positions
+ /// that have already been discarded as matches.
+ ///
+ /// # Safety
+ ///
+ /// It must be safe to do an unaligned read of size(V) bytes starting at
+ /// both (cur + self.index1) and (cur + self.index2). It must also be safe
+ /// to do unaligned loads on cur up to (end - needle.len()).
+ #[inline(always)]
+ unsafe fn find_in_chunk(
+ &self,
+ needle: &[u8],
+ cur: *const u8,
+ end: *const u8,
+ mask: V::Mask,
+ ) -> Option<usize> {
+ let index1 = usize::from(self.pair.index1());
+ let index2 = usize::from(self.pair.index2());
+ let chunk1 = V::load_unaligned(cur.add(index1));
+ let chunk2 = V::load_unaligned(cur.add(index2));
+ let eq1 = chunk1.cmpeq(self.v1);
+ let eq2 = chunk2.cmpeq(self.v2);
+
+ let mut offsets = eq1.and(eq2).movemask().and(mask);
+ while offsets.has_non_zero() {
+ let offset = offsets.first_offset();
+ let cur = cur.add(offset);
+ if end.sub(needle.len()) < cur {
+ return None;
+ }
+ if is_equal_raw(needle.as_ptr(), cur, needle.len()) {
+ return Some(offset);
+ }
+ offsets = offsets.clear_least_significant_bit();
+ }
+ None
+ }
+
+ /// Search for an occurrence of our byte pair from the needle in the chunk
+ /// pointed to by cur, with the end of the haystack pointed to by end.
+ /// When an occurrence is found, memcmp is run to check if a match occurs
+ /// at the corresponding position.
+ ///
+ /// # Safety
+ ///
+ /// It must be safe to do an unaligned read of size(V) bytes starting at
+ /// both (cur + self.index1) and (cur + self.index2). It must also be safe
+ /// to do unaligned reads on cur up to (end - needle.len()).
+ #[inline(always)]
+ unsafe fn find_prefilter_in_chunk(&self, cur: *const u8) -> Option<usize> {
+ let index1 = usize::from(self.pair.index1());
+ let index2 = usize::from(self.pair.index2());
+ let chunk1 = V::load_unaligned(cur.add(index1));
+ let chunk2 = V::load_unaligned(cur.add(index2));
+ let eq1 = chunk1.cmpeq(self.v1);
+ let eq2 = chunk2.cmpeq(self.v2);
+
+ let offsets = eq1.and(eq2).movemask();
+ if !offsets.has_non_zero() {
+ return None;
+ }
+ Some(offsets.first_offset())
+ }
+
+ /// Returns the pair of offsets (into the needle) used to check as a
+ /// predicate before confirming whether a needle exists at a particular
+ /// position.
+ #[inline]
+ pub(crate) fn pair(&self) -> &Pair {
+ &self.pair
+ }
+
+ /// Returns the minimum haystack length that this `Finder` can search.
+ ///
+ /// Providing a haystack to this `Finder` shorter than this length is
+ /// guaranteed to result in a panic.
+ #[inline(always)]
+ pub(crate) fn min_haystack_len(&self) -> usize {
+ self.min_haystack_len
+ }
+}
+
+/// Accepts a chunk-relative offset and returns a haystack relative offset.
+///
+/// This used to be marked `#[cold]` and `#[inline(never)]`, but I couldn't
+/// observe a consistent measureable difference between that and just inlining
+/// it. So we go with inlining it.
+///
+/// # Safety
+///
+/// Same at `ptr::offset_from` in addition to `cur >= start`.
+#[inline(always)]
+unsafe fn matched(start: *const u8, cur: *const u8, chunki: usize) -> usize {
+ cur.distance(start) + chunki
+}
+
+// If you're looking for tests, those are run for each instantiation of the
+// above code. So for example, see arch::x86_64::sse2::packedpair.
diff --git a/vendor/memchr/src/arch/mod.rs b/vendor/memchr/src/arch/mod.rs
new file mode 100644
index 000000000..2f63a1a21
--- /dev/null
+++ b/vendor/memchr/src/arch/mod.rs
@@ -0,0 +1,16 @@
+/*!
+A module with low-level architecture dependent routines.
+
+These routines are useful as primitives for tasks not covered by the higher
+level crate API.
+*/
+
+pub mod all;
+pub(crate) mod generic;
+
+#[cfg(target_arch = "aarch64")]
+pub mod aarch64;
+#[cfg(target_arch = "wasm32")]
+pub mod wasm32;
+#[cfg(target_arch = "x86_64")]
+pub mod x86_64;
diff --git a/vendor/memchr/src/arch/wasm32/memchr.rs b/vendor/memchr/src/arch/wasm32/memchr.rs
new file mode 100644
index 000000000..b0bbd1c6a
--- /dev/null
+++ b/vendor/memchr/src/arch/wasm32/memchr.rs
@@ -0,0 +1,137 @@
+/*!
+Wrapper routines for `memchr` and friends.
+
+These routines choose the best implementation at compile time. (This is
+different from `x86_64` because it is expected that `simd128` is almost always
+available for `wasm32` targets.)
+*/
+
+macro_rules! defraw {
+ ($ty:ident, $find:ident, $start:ident, $end:ident, $($needles:ident),+) => {{
+ #[cfg(target_feature = "simd128")]
+ {
+ use crate::arch::wasm32::simd128::memchr::$ty;
+
+ debug!("chose simd128 for {}", stringify!($ty));
+ debug_assert!($ty::is_available());
+ // SAFETY: We know that wasm memchr is always available whenever
+ // code is compiled for `wasm32` with the `simd128` target feature
+ // enabled.
+ $ty::new_unchecked($($needles),+).$find($start, $end)
+ }
+ #[cfg(not(target_feature = "simd128"))]
+ {
+ use crate::arch::all::memchr::$ty;
+
+ debug!(
+ "no simd128 feature available, using fallback for {}",
+ stringify!($ty),
+ );
+ $ty::new($($needles),+).$find($start, $end)
+ }
+ }}
+}
+
+/// memchr, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::find_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memchr_raw(
+ n1: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(One, find_raw, start, end, n1)
+}
+
+/// memrchr, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::rfind_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memrchr_raw(
+ n1: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(One, rfind_raw, start, end, n1)
+}
+
+/// memchr2, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Two::find_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memchr2_raw(
+ n1: u8,
+ n2: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(Two, find_raw, start, end, n1, n2)
+}
+
+/// memrchr2, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Two::rfind_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memrchr2_raw(
+ n1: u8,
+ n2: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(Two, rfind_raw, start, end, n1, n2)
+}
+
+/// memchr3, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Three::find_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memchr3_raw(
+ n1: u8,
+ n2: u8,
+ n3: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(Three, find_raw, start, end, n1, n2, n3)
+}
+
+/// memrchr3, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Three::rfind_raw`.
+#[inline(always)]
+pub(crate) unsafe fn memrchr3_raw(
+ n1: u8,
+ n2: u8,
+ n3: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ defraw!(Three, rfind_raw, start, end, n1, n2, n3)
+}
+
+/// Count all matching bytes, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::count_raw`.
+#[inline(always)]
+pub(crate) unsafe fn count_raw(
+ n1: u8,
+ start: *const u8,
+ end: *const u8,
+) -> usize {
+ defraw!(One, count_raw, start, end, n1)
+}
diff --git a/vendor/memchr/src/arch/wasm32/mod.rs b/vendor/memchr/src/arch/wasm32/mod.rs
new file mode 100644
index 000000000..209f876cb
--- /dev/null
+++ b/vendor/memchr/src/arch/wasm32/mod.rs
@@ -0,0 +1,7 @@
+/*!
+Vector algorithms for the `wasm32` target.
+*/
+
+pub mod simd128;
+
+pub(crate) mod memchr;
diff --git a/vendor/memchr/src/arch/wasm32/simd128/memchr.rs b/vendor/memchr/src/arch/wasm32/simd128/memchr.rs
new file mode 100644
index 000000000..fa314c9d1
--- /dev/null
+++ b/vendor/memchr/src/arch/wasm32/simd128/memchr.rs
@@ -0,0 +1,1020 @@
+/*!
+This module defines 128-bit vector implementations of `memchr` and friends.
+
+The main types in this module are [`One`], [`Two`] and [`Three`]. They are for
+searching for one, two or three distinct bytes, respectively, in a haystack.
+Each type also has corresponding double ended iterators. These searchers are
+typically much faster than scalar routines accomplishing the same task.
+
+The `One` searcher also provides a [`One::count`] routine for efficiently
+counting the number of times a single byte occurs in a haystack. This is
+useful, for example, for counting the number of lines in a haystack. This
+routine exists because it is usually faster, especially with a high match
+count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its
+`Iterator::count` implementation to use this routine.)
+
+Only one, two and three bytes are supported because three bytes is about
+the point where one sees diminishing returns. Beyond this point and it's
+probably (but not necessarily) better to just use a simple `[bool; 256]` array
+or similar. However, it depends mightily on the specific work-load and the
+expected match frequency.
+*/
+
+use core::arch::wasm32::v128;
+
+use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector};
+
+/// Finds all occurrences of a single byte in a haystack.
+#[derive(Clone, Copy, Debug)]
+pub struct One(generic::One<v128>);
+
+impl One {
+ /// Create a new searcher that finds occurrences of the needle byte given.
+ ///
+ /// This particular searcher is specialized to use simd128 vector
+ /// instructions that typically make it quite fast.
+ ///
+ /// If simd128 is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle: u8) -> Option<One> {
+ if One::is_available() {
+ // SAFETY: we check that simd128 is available above.
+ unsafe { Some(One::new_unchecked(needle)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to simd128 vectors and routines without
+ /// checking that simd128 is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute `simd128`
+ /// instructions in the current environment.
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ pub unsafe fn new_unchecked(needle: u8) -> One {
+ One(generic::One::new(needle))
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`One::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `One::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(target_feature = "simd128")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "simd128"))]
+ {
+ false
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Counts all occurrences of this byte in the given haystack.
+ #[inline]
+ pub fn count(&self, haystack: &[u8]) -> usize {
+ // SAFETY: All of our pointers are derived directly from a borrowed
+ // slice, which is guaranteed to be valid.
+ unsafe {
+ let start = haystack.as_ptr();
+ let end = start.add(haystack.len());
+ self.count_raw(start, end)
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < v128::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ });
+ }
+ // SAFETY: Building a `One` means it's safe to call 'simd128' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.find_raw_impl(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < v128::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::rev_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ });
+ }
+ // SAFETY: Building a `One` means it's safe to call 'simd128' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.rfind_raw_impl(start, end)
+ }
+
+ /// Counts all occurrences of this byte in the given haystack represented
+ /// by raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize {
+ if start >= end {
+ return 0;
+ }
+ if end.distance(start) < v128::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::count_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ });
+ }
+ // SAFETY: Building a `One` means it's safe to call 'simd128' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.count_raw_impl(start, end)
+ }
+
+ /// Execute a search using simd128 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a simd128 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `simd128` routines.)
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ unsafe fn find_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.find_raw(start, end)
+ }
+
+ /// Execute a search using simd128 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a simd128 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `simd128` routines.)
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ unsafe fn rfind_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.rfind_raw(start, end)
+ }
+
+ /// Execute a count using simd128 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::count_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a simd128 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `simd128` routines.)
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ unsafe fn count_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> usize {
+ self.0.count_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle byte in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> {
+ OneIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of a single byte in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`One::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`One`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct OneIter<'a, 'h> {
+ searcher: &'a One,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for OneIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count(|s, e| {
+ // SAFETY: We rely on our generic iterator to return valid start
+ // and end pointers.
+ unsafe { self.searcher.count_raw(s, e) }
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {}
+
+/// Finds all occurrences of two bytes in a haystack.
+///
+/// That is, this reports matches of one of two possible bytes. For example,
+/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`,
+/// `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub struct Two(generic::Two<v128>);
+
+impl Two {
+ /// Create a new searcher that finds occurrences of the needle bytes given.
+ ///
+ /// This particular searcher is specialized to use simd128 vector
+ /// instructions that typically make it quite fast.
+ ///
+ /// If simd128 is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle1: u8, needle2: u8) -> Option<Two> {
+ if Two::is_available() {
+ // SAFETY: we check that simd128 is available above.
+ unsafe { Some(Two::new_unchecked(needle1, needle2)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to simd128 vectors and routines without
+ /// checking that simd128 is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute `simd128`
+ /// instructions in the current environment.
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two {
+ Two(generic::Two::new(needle1, needle2))
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Two::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `Two::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(target_feature = "simd128")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "simd128"))]
+ {
+ false
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < v128::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.0.needle1() || b == self.0.needle2()
+ });
+ }
+ // SAFETY: Building a `Two` means it's safe to call 'simd128' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.find_raw_impl(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < v128::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::rev_byte_by_byte(start, end, |b| {
+ b == self.0.needle1() || b == self.0.needle2()
+ });
+ }
+ // SAFETY: Building a `Two` means it's safe to call 'simd128' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.rfind_raw_impl(start, end)
+ }
+
+ /// Execute a search using simd128 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Two::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a simd128 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Two`, which can only be constructed
+ /// when it is safe to call `simd128` routines.)
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ unsafe fn find_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.find_raw(start, end)
+ }
+
+ /// Execute a search using simd128 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Two::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a simd128 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Two`, which can only be constructed
+ /// when it is safe to call `simd128` routines.)
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ unsafe fn rfind_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.rfind_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle bytes in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> {
+ TwoIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of two possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`Two::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`Two`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct TwoIter<'a, 'h> {
+ searcher: &'a Two,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for TwoIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {}
+
+/// Finds all occurrences of three bytes in a haystack.
+///
+/// That is, this reports matches of one of three possible bytes. For example,
+/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets
+/// `0`, `2`, `3`, `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub struct Three(generic::Three<v128>);
+
+impl Three {
+ /// Create a new searcher that finds occurrences of the needle bytes given.
+ ///
+ /// This particular searcher is specialized to use simd128 vector
+ /// instructions that typically make it quite fast.
+ ///
+ /// If simd128 is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option<Three> {
+ if Three::is_available() {
+ // SAFETY: we check that simd128 is available above.
+ unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to simd128 vectors and routines without
+ /// checking that simd128 is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute `simd128`
+ /// instructions in the current environment.
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ pub unsafe fn new_unchecked(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ ) -> Three {
+ Three(generic::Three::new(needle1, needle2, needle3))
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Three::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `Three::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(target_feature = "simd128")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "simd128"))]
+ {
+ false
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < v128::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ || b == self.0.needle2()
+ || b == self.0.needle3()
+ });
+ }
+ // SAFETY: Building a `Three` means it's safe to call 'simd128'
+ // routines. Also, we've checked that our haystack is big enough to run
+ // on the vector routine. Pointer validity is caller's responsibility.
+ self.find_raw_impl(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < v128::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::rev_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ || b == self.0.needle2()
+ || b == self.0.needle3()
+ });
+ }
+ // SAFETY: Building a `Three` means it's safe to call 'simd128'
+ // routines. Also, we've checked that our haystack is big enough to run
+ // on the vector routine. Pointer validity is caller's responsibility.
+ self.rfind_raw_impl(start, end)
+ }
+
+ /// Execute a search using simd128 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Three::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a simd128 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Three`, which can only be constructed
+ /// when it is safe to call `simd128` routines.)
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ unsafe fn find_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.find_raw(start, end)
+ }
+
+ /// Execute a search using simd128 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Three::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of a simd128 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Three`, which can only be constructed
+ /// when it is safe to call `simd128` routines.)
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ unsafe fn rfind_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.rfind_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle byte in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> {
+ ThreeIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of three possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`Three::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`Three`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct ThreeIter<'a, 'h> {
+ searcher: &'a Three,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for ThreeIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ define_memchr_quickcheck!(super);
+
+ #[test]
+ fn forward_one() {
+ crate::tests::memchr::Runner::new(1).forward_iter(
+ |haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_one() {
+ crate::tests::memchr::Runner::new(1).reverse_iter(
+ |haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).rev().collect())
+ },
+ )
+ }
+
+ #[test]
+ fn count_one() {
+ crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).count())
+ })
+ }
+
+ #[test]
+ fn forward_two() {
+ crate::tests::memchr::Runner::new(2).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(Two::new(n1, n2)?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_two() {
+ crate::tests::memchr::Runner::new(2).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(Two::new(n1, n2)?.iter(haystack).rev().collect())
+ },
+ )
+ }
+
+ #[test]
+ fn forward_three() {
+ crate::tests::memchr::Runner::new(3).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(Three::new(n1, n2, n3)?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_three() {
+ crate::tests::memchr::Runner::new(3).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect())
+ },
+ )
+ }
+}
diff --git a/vendor/memchr/src/arch/wasm32/simd128/mod.rs b/vendor/memchr/src/arch/wasm32/simd128/mod.rs
new file mode 100644
index 000000000..b55d1f07b
--- /dev/null
+++ b/vendor/memchr/src/arch/wasm32/simd128/mod.rs
@@ -0,0 +1,6 @@
+/*!
+Algorithms for the `wasm32` target using 128-bit vectors via simd128.
+*/
+
+pub mod memchr;
+pub mod packedpair;
diff --git a/vendor/memchr/src/arch/wasm32/simd128/packedpair.rs b/vendor/memchr/src/arch/wasm32/simd128/packedpair.rs
new file mode 100644
index 000000000..b62937744
--- /dev/null
+++ b/vendor/memchr/src/arch/wasm32/simd128/packedpair.rs
@@ -0,0 +1,229 @@
+/*!
+A 128-bit vector implementation of the "packed pair" SIMD algorithm.
+
+The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main
+difference is that it (by default) uses a background distribution of byte
+frequencies to heuristically select the pair of bytes to search for.
+
+[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last
+*/
+
+use core::arch::wasm32::v128;
+
+use crate::arch::{all::packedpair::Pair, generic::packedpair};
+
+/// A "packed pair" finder that uses 128-bit vector operations.
+///
+/// This finder picks two bytes that it believes have high predictive power
+/// for indicating an overall match of a needle. Depending on whether
+/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets
+/// where the needle matches or could match. In the prefilter case, candidates
+/// are reported whenever the [`Pair`] of bytes given matches.
+#[derive(Clone, Copy, Debug)]
+pub struct Finder(packedpair::Finder<v128>);
+
+impl Finder {
+ /// Create a new pair searcher. The searcher returned can either report
+ /// exact matches of `needle` or act as a prefilter and report candidate
+ /// positions of `needle`.
+ ///
+ /// If simd128 is unavailable in the current environment or if a [`Pair`]
+ /// could not be constructed from the needle given, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle: &[u8]) -> Option<Finder> {
+ Finder::with_pair(needle, Pair::new(needle)?)
+ }
+
+ /// Create a new "packed pair" finder using the pair of bytes given.
+ ///
+ /// This constructor permits callers to control precisely which pair of
+ /// bytes is used as a predicate.
+ ///
+ /// If simd128 is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn with_pair(needle: &[u8], pair: Pair) -> Option<Finder> {
+ if Finder::is_available() {
+ // SAFETY: we check that simd128 is available above. We are also
+ // guaranteed to have needle.len() > 1 because we have a valid
+ // Pair.
+ unsafe { Some(Finder::with_pair_impl(needle, pair)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new `Finder` specific to simd128 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as the safety for `packedpair::Finder::new`, and callers must also
+ /// ensure that simd128 is available.
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder {
+ let finder = packedpair::Finder::<v128>::new(needle, pair);
+ Finder(finder)
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Finder::with_pair`] will
+ /// return a `Some` value. Similarly, when it is false, it is guaranteed
+ /// that `Finder::with_pair` will return a `None` value. Notice that this
+ /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely,
+ /// even when `Finder::is_available` is true, it is not guaranteed that a
+ /// valid [`Pair`] can be found from the needle given.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(target_feature = "simd128")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "simd128"))]
+ {
+ false
+ }
+ }
+
+ /// Execute a search using wasm32 v128 vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ #[inline]
+ pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> {
+ self.find_impl(haystack, needle)
+ }
+
+ /// Execute a search using wasm32 v128 vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ #[inline]
+ pub fn find_prefilter(&self, haystack: &[u8]) -> Option<usize> {
+ self.find_prefilter_impl(haystack)
+ }
+
+ /// Execute a search using wasm32 v128 vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ ///
+ /// # Safety
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Finder`, which can only be constructed
+ /// when it is safe to call `simd128` routines.)
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ fn find_impl(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> {
+ // SAFETY: The target feature safety obligation is automatically
+ // fulfilled by virtue of being a method on `Finder`, which can only be
+ // constructed when it is safe to call `simd128` routines.
+ unsafe { self.0.find(haystack, needle) }
+ }
+
+ /// Execute a prefilter search using wasm32 v128 vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ ///
+ /// # Safety
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Finder`, which can only be constructed
+ /// when it is safe to call `simd128` routines.)
+ #[target_feature(enable = "simd128")]
+ #[inline]
+ fn find_prefilter_impl(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: The target feature safety obligation is automatically
+ // fulfilled by virtue of being a method on `Finder`, which can only be
+ // constructed when it is safe to call `simd128` routines.
+ unsafe { self.0.find_prefilter(haystack) }
+ }
+
+ /// Returns the pair of offsets (into the needle) used to check as a
+ /// predicate before confirming whether a needle exists at a particular
+ /// position.
+ #[inline]
+ pub fn pair(&self) -> &Pair {
+ self.0.pair()
+ }
+
+ /// Returns the minimum haystack length that this `Finder` can search.
+ ///
+ /// Using a haystack with length smaller than this in a search will result
+ /// in a panic. The reason for this restriction is that this finder is
+ /// meant to be a low-level component that is part of a larger substring
+ /// strategy. In that sense, it avoids trying to handle all cases and
+ /// instead only handles the cases that it can handle very well.
+ #[inline]
+ pub fn min_haystack_len(&self) -> usize {
+ self.0.min_haystack_len()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn find(haystack: &[u8], needle: &[u8]) -> Option<Option<usize>> {
+ let f = Finder::new(needle)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find(haystack, needle))
+ }
+
+ define_substring_forward_quickcheck!(find);
+
+ #[test]
+ fn forward_substring() {
+ crate::tests::substring::Runner::new().fwd(find).run()
+ }
+
+ #[test]
+ fn forward_packedpair() {
+ fn find(
+ haystack: &[u8],
+ needle: &[u8],
+ index1: u8,
+ index2: u8,
+ ) -> Option<Option<usize>> {
+ let pair = Pair::with_indices(needle, index1, index2)?;
+ let f = Finder::with_pair(needle, pair)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find(haystack, needle))
+ }
+ crate::tests::packedpair::Runner::new().fwd(find).run()
+ }
+
+ #[test]
+ fn forward_packedpair_prefilter() {
+ fn find(
+ haystack: &[u8],
+ needle: &[u8],
+ index1: u8,
+ index2: u8,
+ ) -> Option<Option<usize>> {
+ let pair = Pair::with_indices(needle, index1, index2)?;
+ let f = Finder::with_pair(needle, pair)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find_prefilter(haystack))
+ }
+ crate::tests::packedpair::Runner::new().fwd(find).run()
+ }
+}
diff --git a/vendor/memchr/src/arch/x86_64/avx2/memchr.rs b/vendor/memchr/src/arch/x86_64/avx2/memchr.rs
new file mode 100644
index 000000000..59f8c7f73
--- /dev/null
+++ b/vendor/memchr/src/arch/x86_64/avx2/memchr.rs
@@ -0,0 +1,1352 @@
+/*!
+This module defines 256-bit vector implementations of `memchr` and friends.
+
+The main types in this module are [`One`], [`Two`] and [`Three`]. They are for
+searching for one, two or three distinct bytes, respectively, in a haystack.
+Each type also has corresponding double ended iterators. These searchers are
+typically much faster than scalar routines accomplishing the same task.
+
+The `One` searcher also provides a [`One::count`] routine for efficiently
+counting the number of times a single byte occurs in a haystack. This is
+useful, for example, for counting the number of lines in a haystack. This
+routine exists because it is usually faster, especially with a high match
+count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its
+`Iterator::count` implementation to use this routine.)
+
+Only one, two and three bytes are supported because three bytes is about
+the point where one sees diminishing returns. Beyond this point and it's
+probably (but not necessarily) better to just use a simple `[bool; 256]` array
+or similar. However, it depends mightily on the specific work-load and the
+expected match frequency.
+*/
+
+use core::arch::x86_64::{__m128i, __m256i};
+
+use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector};
+
+/// Finds all occurrences of a single byte in a haystack.
+#[derive(Clone, Copy, Debug)]
+pub struct One {
+ /// Used for haystacks less than 32 bytes.
+ sse2: generic::One<__m128i>,
+ /// Used for haystacks bigger than 32 bytes.
+ avx2: generic::One<__m256i>,
+}
+
+impl One {
+ /// Create a new searcher that finds occurrences of the needle byte given.
+ ///
+ /// This particular searcher is specialized to use AVX2 vector instructions
+ /// that typically make it quite fast. (SSE2 is used for haystacks that
+ /// are too short to accommodate an AVX2 vector.)
+ ///
+ /// If either SSE2 or AVX2 is unavailable in the current environment, then
+ /// `None` is returned.
+ #[inline]
+ pub fn new(needle: u8) -> Option<One> {
+ if One::is_available() {
+ // SAFETY: we check that sse2 and avx2 are available above.
+ unsafe { Some(One::new_unchecked(needle)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to AVX2 vectors and routines without
+ /// checking that either SSE2 or AVX2 is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute both `sse2` and
+ /// `avx2` instructions in the current environment.
+ ///
+ /// Note that it is a common misconception that if one compiles for an
+ /// `x86_64` target, then they therefore automatically have access to SSE2
+ /// instructions. While this is almost always the case, it isn't true in
+ /// 100% of cases.
+ #[target_feature(enable = "sse2", enable = "avx2")]
+ #[inline]
+ pub unsafe fn new_unchecked(needle: u8) -> One {
+ One {
+ sse2: generic::One::new(needle),
+ avx2: generic::One::new(needle),
+ }
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`One::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `One::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(not(target_feature = "sse2"))]
+ {
+ false
+ }
+ #[cfg(target_feature = "sse2")]
+ {
+ #[cfg(target_feature = "avx2")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "avx2"))]
+ {
+ #[cfg(feature = "std")]
+ {
+ std::is_x86_feature_detected!("avx2")
+ }
+ #[cfg(not(feature = "std"))]
+ {
+ false
+ }
+ }
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Counts all occurrences of this byte in the given haystack.
+ #[inline]
+ pub fn count(&self, haystack: &[u8]) -> usize {
+ // SAFETY: All of our pointers are derived directly from a borrowed
+ // slice, which is guaranteed to be valid.
+ unsafe {
+ let start = haystack.as_ptr();
+ let end = start.add(haystack.len());
+ self.count_raw(start, end)
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let len = end.distance(start);
+ if len < __m256i::BYTES {
+ return if len < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.sse2.needle1()
+ })
+ } else {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ self.find_raw_sse2(start, end)
+ };
+ }
+ // SAFETY: Building a `One` means it's safe to call both 'sse2' and
+ // 'avx2' routines. Also, we've checked that our haystack is big
+ // enough to run on the vector routine. Pointer validity is caller's
+ // responsibility.
+ //
+ // Note that we could call `self.avx2.find_raw` directly here. But that
+ // means we'd have to annotate this routine with `target_feature`.
+ // Which is fine, because this routine is `unsafe` anyway and the
+ // `target_feature` obligation is met by virtue of building a `One`.
+ // The real problem is that a routine with a `target_feature`
+ // annotation generally can't be inlined into caller code unless
+ // the caller code has the same target feature annotations. Namely,
+ // the common case (at time of writing) is for calling code to not
+ // have the `avx2` target feature enabled *at compile time*. Without
+ // `target_feature` on this routine, it can be inlined which will
+ // handle some of the short-haystack cases above without touching the
+ // architecture specific code.
+ self.find_raw_avx2(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let len = end.distance(start);
+ if len < __m256i::BYTES {
+ return if len < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ generic::rev_byte_by_byte(start, end, |b| {
+ b == self.sse2.needle1()
+ })
+ } else {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ self.rfind_raw_sse2(start, end)
+ };
+ }
+ // SAFETY: Building a `One` means it's safe to call both 'sse2' and
+ // 'avx2' routines. Also, we've checked that our haystack is big
+ // enough to run on the vector routine. Pointer validity is caller's
+ // responsibility.
+ //
+ // See note in forward routine above for why we don't just call
+ // `self.avx2.rfind_raw` directly here.
+ self.rfind_raw_avx2(start, end)
+ }
+
+ /// Counts all occurrences of this byte in the given haystack represented
+ /// by raw pointers.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `0` will always be returned.
+ #[inline]
+ pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize {
+ if start >= end {
+ return 0;
+ }
+ let len = end.distance(start);
+ if len < __m256i::BYTES {
+ return if len < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ generic::count_byte_by_byte(start, end, |b| {
+ b == self.sse2.needle1()
+ })
+ } else {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ self.count_raw_sse2(start, end)
+ };
+ }
+ // SAFETY: Building a `One` means it's safe to call both 'sse2' and
+ // 'avx2' routines. Also, we've checked that our haystack is big
+ // enough to run on the vector routine. Pointer validity is caller's
+ // responsibility.
+ self.count_raw_avx2(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn find_raw_sse2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.sse2.find_raw(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn rfind_raw_sse2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.sse2.rfind_raw(start, end)
+ }
+
+ /// Execute a count using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::count_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn count_raw_sse2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> usize {
+ self.sse2.count_raw(start, end)
+ }
+
+ /// Execute a search using AVX2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an AVX2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "avx2")]
+ #[inline]
+ unsafe fn find_raw_avx2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.avx2.find_raw(start, end)
+ }
+
+ /// Execute a search using AVX2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an AVX2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "avx2")]
+ #[inline]
+ unsafe fn rfind_raw_avx2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.avx2.rfind_raw(start, end)
+ }
+
+ /// Execute a count using AVX2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::count_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an AVX2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "avx2")]
+ #[inline]
+ unsafe fn count_raw_avx2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> usize {
+ self.avx2.count_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle byte in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> {
+ OneIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of a single byte in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`One::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`One`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct OneIter<'a, 'h> {
+ searcher: &'a One,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for OneIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count(|s, e| {
+ // SAFETY: We rely on our generic iterator to return valid start
+ // and end pointers.
+ unsafe { self.searcher.count_raw(s, e) }
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {}
+
+/// Finds all occurrences of two bytes in a haystack.
+///
+/// That is, this reports matches of one of two possible bytes. For example,
+/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`,
+/// `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub struct Two {
+ /// Used for haystacks less than 32 bytes.
+ sse2: generic::Two<__m128i>,
+ /// Used for haystacks bigger than 32 bytes.
+ avx2: generic::Two<__m256i>,
+}
+
+impl Two {
+ /// Create a new searcher that finds occurrences of the needle bytes given.
+ ///
+ /// This particular searcher is specialized to use AVX2 vector instructions
+ /// that typically make it quite fast. (SSE2 is used for haystacks that
+ /// are too short to accommodate an AVX2 vector.)
+ ///
+ /// If either SSE2 or AVX2 is unavailable in the current environment, then
+ /// `None` is returned.
+ #[inline]
+ pub fn new(needle1: u8, needle2: u8) -> Option<Two> {
+ if Two::is_available() {
+ // SAFETY: we check that sse2 and avx2 are available above.
+ unsafe { Some(Two::new_unchecked(needle1, needle2)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to AVX2 vectors and routines without
+ /// checking that either SSE2 or AVX2 is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute both `sse2` and
+ /// `avx2` instructions in the current environment.
+ ///
+ /// Note that it is a common misconception that if one compiles for an
+ /// `x86_64` target, then they therefore automatically have access to SSE2
+ /// instructions. While this is almost always the case, it isn't true in
+ /// 100% of cases.
+ #[target_feature(enable = "sse2", enable = "avx2")]
+ #[inline]
+ pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two {
+ Two {
+ sse2: generic::Two::new(needle1, needle2),
+ avx2: generic::Two::new(needle1, needle2),
+ }
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Two::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `Two::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(not(target_feature = "sse2"))]
+ {
+ false
+ }
+ #[cfg(target_feature = "sse2")]
+ {
+ #[cfg(target_feature = "avx2")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "avx2"))]
+ {
+ #[cfg(feature = "std")]
+ {
+ std::is_x86_feature_detected!("avx2")
+ }
+ #[cfg(not(feature = "std"))]
+ {
+ false
+ }
+ }
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let len = end.distance(start);
+ if len < __m256i::BYTES {
+ return if len < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.sse2.needle1() || b == self.sse2.needle2()
+ })
+ } else {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ self.find_raw_sse2(start, end)
+ };
+ }
+ // SAFETY: Building a `Two` means it's safe to call both 'sse2' and
+ // 'avx2' routines. Also, we've checked that our haystack is big
+ // enough to run on the vector routine. Pointer validity is caller's
+ // responsibility.
+ //
+ // Note that we could call `self.avx2.find_raw` directly here. But that
+ // means we'd have to annotate this routine with `target_feature`.
+ // Which is fine, because this routine is `unsafe` anyway and the
+ // `target_feature` obligation is met by virtue of building a `Two`.
+ // The real problem is that a routine with a `target_feature`
+ // annotation generally can't be inlined into caller code unless
+ // the caller code has the same target feature annotations. Namely,
+ // the common case (at time of writing) is for calling code to not
+ // have the `avx2` target feature enabled *at compile time*. Without
+ // `target_feature` on this routine, it can be inlined which will
+ // handle some of the short-haystack cases above without touching the
+ // architecture specific code.
+ self.find_raw_avx2(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let len = end.distance(start);
+ if len < __m256i::BYTES {
+ return if len < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ generic::rev_byte_by_byte(start, end, |b| {
+ b == self.sse2.needle1() || b == self.sse2.needle2()
+ })
+ } else {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ self.rfind_raw_sse2(start, end)
+ };
+ }
+ // SAFETY: Building a `Two` means it's safe to call both 'sse2' and
+ // 'avx2' routines. Also, we've checked that our haystack is big
+ // enough to run on the vector routine. Pointer validity is caller's
+ // responsibility.
+ //
+ // See note in forward routine above for why we don't just call
+ // `self.avx2.rfind_raw` directly here.
+ self.rfind_raw_avx2(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Two::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Two`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn find_raw_sse2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.sse2.find_raw(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Two::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Two`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn rfind_raw_sse2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.sse2.rfind_raw(start, end)
+ }
+
+ /// Execute a search using AVX2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Two::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an AVX2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Two`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "avx2")]
+ #[inline]
+ unsafe fn find_raw_avx2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.avx2.find_raw(start, end)
+ }
+
+ /// Execute a search using AVX2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Two::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an AVX2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Two`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "avx2")]
+ #[inline]
+ unsafe fn rfind_raw_avx2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.avx2.rfind_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle bytes in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> {
+ TwoIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of two possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`Two::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`Two`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct TwoIter<'a, 'h> {
+ searcher: &'a Two,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for TwoIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {}
+
+/// Finds all occurrences of three bytes in a haystack.
+///
+/// That is, this reports matches of one of three possible bytes. For example,
+/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets
+/// `0`, `2`, `3`, `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub struct Three {
+ /// Used for haystacks less than 32 bytes.
+ sse2: generic::Three<__m128i>,
+ /// Used for haystacks bigger than 32 bytes.
+ avx2: generic::Three<__m256i>,
+}
+
+impl Three {
+ /// Create a new searcher that finds occurrences of the needle bytes given.
+ ///
+ /// This particular searcher is specialized to use AVX2 vector instructions
+ /// that typically make it quite fast. (SSE2 is used for haystacks that
+ /// are too short to accommodate an AVX2 vector.)
+ ///
+ /// If either SSE2 or AVX2 is unavailable in the current environment, then
+ /// `None` is returned.
+ #[inline]
+ pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option<Three> {
+ if Three::is_available() {
+ // SAFETY: we check that sse2 and avx2 are available above.
+ unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to AVX2 vectors and routines without
+ /// checking that either SSE2 or AVX2 is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute both `sse2` and
+ /// `avx2` instructions in the current environment.
+ ///
+ /// Note that it is a common misconception that if one compiles for an
+ /// `x86_64` target, then they therefore automatically have access to SSE2
+ /// instructions. While this is almost always the case, it isn't true in
+ /// 100% of cases.
+ #[target_feature(enable = "sse2", enable = "avx2")]
+ #[inline]
+ pub unsafe fn new_unchecked(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ ) -> Three {
+ Three {
+ sse2: generic::Three::new(needle1, needle2, needle3),
+ avx2: generic::Three::new(needle1, needle2, needle3),
+ }
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Three::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `Three::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(not(target_feature = "sse2"))]
+ {
+ false
+ }
+ #[cfg(target_feature = "sse2")]
+ {
+ #[cfg(target_feature = "avx2")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "avx2"))]
+ {
+ #[cfg(feature = "std")]
+ {
+ std::is_x86_feature_detected!("avx2")
+ }
+ #[cfg(not(feature = "std"))]
+ {
+ false
+ }
+ }
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let len = end.distance(start);
+ if len < __m256i::BYTES {
+ return if len < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.sse2.needle1()
+ || b == self.sse2.needle2()
+ || b == self.sse2.needle3()
+ })
+ } else {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ self.find_raw_sse2(start, end)
+ };
+ }
+ // SAFETY: Building a `Three` means it's safe to call both 'sse2' and
+ // 'avx2' routines. Also, we've checked that our haystack is big
+ // enough to run on the vector routine. Pointer validity is caller's
+ // responsibility.
+ //
+ // Note that we could call `self.avx2.find_raw` directly here. But that
+ // means we'd have to annotate this routine with `target_feature`.
+ // Which is fine, because this routine is `unsafe` anyway and the
+ // `target_feature` obligation is met by virtue of building a `Three`.
+ // The real problem is that a routine with a `target_feature`
+ // annotation generally can't be inlined into caller code unless
+ // the caller code has the same target feature annotations. Namely,
+ // the common case (at time of writing) is for calling code to not
+ // have the `avx2` target feature enabled *at compile time*. Without
+ // `target_feature` on this routine, it can be inlined which will
+ // handle some of the short-haystack cases above without touching the
+ // architecture specific code.
+ self.find_raw_avx2(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ let len = end.distance(start);
+ if len < __m256i::BYTES {
+ return if len < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ generic::rev_byte_by_byte(start, end, |b| {
+ b == self.sse2.needle1()
+ || b == self.sse2.needle2()
+ || b == self.sse2.needle3()
+ })
+ } else {
+ // SAFETY: We require the caller to pass valid start/end
+ // pointers.
+ self.rfind_raw_sse2(start, end)
+ };
+ }
+ // SAFETY: Building a `Three` means it's safe to call both 'sse2' and
+ // 'avx2' routines. Also, we've checked that our haystack is big
+ // enough to run on the vector routine. Pointer validity is caller's
+ // responsibility.
+ //
+ // See note in forward routine above for why we don't just call
+ // `self.avx2.rfind_raw` directly here.
+ self.rfind_raw_avx2(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Three::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Three`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn find_raw_sse2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.sse2.find_raw(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Three::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Three`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn rfind_raw_sse2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.sse2.rfind_raw(start, end)
+ }
+
+ /// Execute a search using AVX2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Three::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an AVX2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Three`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "avx2")]
+ #[inline]
+ unsafe fn find_raw_avx2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.avx2.find_raw(start, end)
+ }
+
+ /// Execute a search using AVX2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Three::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an AVX2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Three`, which can only be constructed
+ /// when it is safe to call `sse2`/`avx2` routines.)
+ #[target_feature(enable = "avx2")]
+ #[inline]
+ unsafe fn rfind_raw_avx2(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.avx2.rfind_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle bytes in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> {
+ ThreeIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of three possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`Three::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`Three`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct ThreeIter<'a, 'h> {
+ searcher: &'a Three,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for ThreeIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ define_memchr_quickcheck!(super);
+
+ #[test]
+ fn forward_one() {
+ crate::tests::memchr::Runner::new(1).forward_iter(
+ |haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_one() {
+ crate::tests::memchr::Runner::new(1).reverse_iter(
+ |haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).rev().collect())
+ },
+ )
+ }
+
+ #[test]
+ fn count_one() {
+ crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).count())
+ })
+ }
+
+ #[test]
+ fn forward_two() {
+ crate::tests::memchr::Runner::new(2).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(Two::new(n1, n2)?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_two() {
+ crate::tests::memchr::Runner::new(2).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(Two::new(n1, n2)?.iter(haystack).rev().collect())
+ },
+ )
+ }
+
+ #[test]
+ fn forward_three() {
+ crate::tests::memchr::Runner::new(3).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(Three::new(n1, n2, n3)?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_three() {
+ crate::tests::memchr::Runner::new(3).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect())
+ },
+ )
+ }
+}
diff --git a/vendor/memchr/src/arch/x86_64/avx2/mod.rs b/vendor/memchr/src/arch/x86_64/avx2/mod.rs
new file mode 100644
index 000000000..ee4097d6f
--- /dev/null
+++ b/vendor/memchr/src/arch/x86_64/avx2/mod.rs
@@ -0,0 +1,6 @@
+/*!
+Algorithms for the `x86_64` target using 256-bit vectors via AVX2.
+*/
+
+pub mod memchr;
+pub mod packedpair;
diff --git a/vendor/memchr/src/arch/x86_64/avx2/packedpair.rs b/vendor/memchr/src/arch/x86_64/avx2/packedpair.rs
new file mode 100644
index 000000000..efae7b66c
--- /dev/null
+++ b/vendor/memchr/src/arch/x86_64/avx2/packedpair.rs
@@ -0,0 +1,272 @@
+/*!
+A 256-bit vector implementation of the "packed pair" SIMD algorithm.
+
+The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main
+difference is that it (by default) uses a background distribution of byte
+frequencies to heuristically select the pair of bytes to search for.
+
+[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last
+*/
+
+use core::arch::x86_64::{__m128i, __m256i};
+
+use crate::arch::{all::packedpair::Pair, generic::packedpair};
+
+/// A "packed pair" finder that uses 256-bit vector operations.
+///
+/// This finder picks two bytes that it believes have high predictive power
+/// for indicating an overall match of a needle. Depending on whether
+/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets
+/// where the needle matches or could match. In the prefilter case, candidates
+/// are reported whenever the [`Pair`] of bytes given matches.
+#[derive(Clone, Copy, Debug)]
+pub struct Finder {
+ sse2: packedpair::Finder<__m128i>,
+ avx2: packedpair::Finder<__m256i>,
+}
+
+impl Finder {
+ /// Create a new pair searcher. The searcher returned can either report
+ /// exact matches of `needle` or act as a prefilter and report candidate
+ /// positions of `needle`.
+ ///
+ /// If AVX2 is unavailable in the current environment or if a [`Pair`]
+ /// could not be constructed from the needle given, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle: &[u8]) -> Option<Finder> {
+ Finder::with_pair(needle, Pair::new(needle)?)
+ }
+
+ /// Create a new "packed pair" finder using the pair of bytes given.
+ ///
+ /// This constructor permits callers to control precisely which pair of
+ /// bytes is used as a predicate.
+ ///
+ /// If AVX2 is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn with_pair(needle: &[u8], pair: Pair) -> Option<Finder> {
+ if Finder::is_available() {
+ // SAFETY: we check that sse2/avx2 is available above. We are also
+ // guaranteed to have needle.len() > 1 because we have a valid
+ // Pair.
+ unsafe { Some(Finder::with_pair_impl(needle, pair)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new `Finder` specific to SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as the safety for `packedpair::Finder::new`, and callers must also
+ /// ensure that both SSE2 and AVX2 are available.
+ #[target_feature(enable = "sse2", enable = "avx2")]
+ #[inline]
+ unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder {
+ let sse2 = packedpair::Finder::<__m128i>::new(needle, pair);
+ let avx2 = packedpair::Finder::<__m256i>::new(needle, pair);
+ Finder { sse2, avx2 }
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Finder::with_pair`] will
+ /// return a `Some` value. Similarly, when it is false, it is guaranteed
+ /// that `Finder::with_pair` will return a `None` value. Notice that this
+ /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely,
+ /// even when `Finder::is_available` is true, it is not guaranteed that a
+ /// valid [`Pair`] can be found from the needle given.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(not(target_feature = "sse2"))]
+ {
+ false
+ }
+ #[cfg(target_feature = "sse2")]
+ {
+ #[cfg(target_feature = "avx2")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "avx2"))]
+ {
+ #[cfg(feature = "std")]
+ {
+ std::is_x86_feature_detected!("avx2")
+ }
+ #[cfg(not(feature = "std"))]
+ {
+ false
+ }
+ }
+ }
+ }
+
+ /// Execute a search using AVX2 vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ #[inline]
+ pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> {
+ // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines.
+ unsafe { self.find_impl(haystack, needle) }
+ }
+
+ /// Run this finder on the given haystack as a prefilter.
+ ///
+ /// If a candidate match is found, then an offset where the needle *could*
+ /// begin in the haystack is returned.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ #[inline]
+ pub fn find_prefilter(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines.
+ unsafe { self.find_prefilter_impl(haystack) }
+ }
+
+ /// Execute a search using AVX2 vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ ///
+ /// # Safety
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Finder`, which can only be constructed
+ /// when it is safe to call `sse2` and `avx2` routines.)
+ #[target_feature(enable = "sse2", enable = "avx2")]
+ #[inline]
+ unsafe fn find_impl(
+ &self,
+ haystack: &[u8],
+ needle: &[u8],
+ ) -> Option<usize> {
+ if haystack.len() < self.avx2.min_haystack_len() {
+ self.sse2.find(haystack, needle)
+ } else {
+ self.avx2.find(haystack, needle)
+ }
+ }
+
+ /// Execute a prefilter search using AVX2 vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ ///
+ /// # Safety
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Finder`, which can only be constructed
+ /// when it is safe to call `sse2` and `avx2` routines.)
+ #[target_feature(enable = "sse2", enable = "avx2")]
+ #[inline]
+ unsafe fn find_prefilter_impl(&self, haystack: &[u8]) -> Option<usize> {
+ if haystack.len() < self.avx2.min_haystack_len() {
+ self.sse2.find_prefilter(haystack)
+ } else {
+ self.avx2.find_prefilter(haystack)
+ }
+ }
+
+ /// Returns the pair of offsets (into the needle) used to check as a
+ /// predicate before confirming whether a needle exists at a particular
+ /// position.
+ #[inline]
+ pub fn pair(&self) -> &Pair {
+ self.avx2.pair()
+ }
+
+ /// Returns the minimum haystack length that this `Finder` can search.
+ ///
+ /// Using a haystack with length smaller than this in a search will result
+ /// in a panic. The reason for this restriction is that this finder is
+ /// meant to be a low-level component that is part of a larger substring
+ /// strategy. In that sense, it avoids trying to handle all cases and
+ /// instead only handles the cases that it can handle very well.
+ #[inline]
+ pub fn min_haystack_len(&self) -> usize {
+ // The caller doesn't need to care about AVX2's min_haystack_len
+ // since this implementation will automatically switch to the SSE2
+ // implementation if the haystack is too short for AVX2. Therefore, the
+ // caller only needs to care about SSE2's min_haystack_len.
+ //
+ // This does assume that SSE2's min_haystack_len is less than or
+ // equal to AVX2's min_haystack_len. In practice, this is true and
+ // there is no way it could be false based on how this Finder is
+ // implemented. Namely, both SSE2 and AVX2 use the same `Pair`. If
+ // they used different pairs, then it's possible (although perhaps
+ // pathological) for SSE2's min_haystack_len to be bigger than AVX2's.
+ self.sse2.min_haystack_len()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn find(haystack: &[u8], needle: &[u8]) -> Option<Option<usize>> {
+ let f = Finder::new(needle)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find(haystack, needle))
+ }
+
+ define_substring_forward_quickcheck!(find);
+
+ #[test]
+ fn forward_substring() {
+ crate::tests::substring::Runner::new().fwd(find).run()
+ }
+
+ #[test]
+ fn forward_packedpair() {
+ fn find(
+ haystack: &[u8],
+ needle: &[u8],
+ index1: u8,
+ index2: u8,
+ ) -> Option<Option<usize>> {
+ let pair = Pair::with_indices(needle, index1, index2)?;
+ let f = Finder::with_pair(needle, pair)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find(haystack, needle))
+ }
+ crate::tests::packedpair::Runner::new().fwd(find).run()
+ }
+
+ #[test]
+ fn forward_packedpair_prefilter() {
+ fn find(
+ haystack: &[u8],
+ needle: &[u8],
+ index1: u8,
+ index2: u8,
+ ) -> Option<Option<usize>> {
+ if !cfg!(target_feature = "sse2") {
+ return None;
+ }
+ let pair = Pair::with_indices(needle, index1, index2)?;
+ let f = Finder::with_pair(needle, pair)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find_prefilter(haystack))
+ }
+ crate::tests::packedpair::Runner::new().fwd(find).run()
+ }
+}
diff --git a/vendor/memchr/src/arch/x86_64/memchr.rs b/vendor/memchr/src/arch/x86_64/memchr.rs
new file mode 100644
index 000000000..fcb13992f
--- /dev/null
+++ b/vendor/memchr/src/arch/x86_64/memchr.rs
@@ -0,0 +1,335 @@
+/*!
+Wrapper routines for `memchr` and friends.
+
+These routines efficiently dispatch to the best implementation based on what
+the CPU supports.
+*/
+
+/// Provides a way to run a memchr-like function while amortizing the cost of
+/// runtime CPU feature detection.
+///
+/// This works by loading a function pointer from an atomic global. Initially,
+/// this global is set to a function that does CPU feature detection. For
+/// example, if AVX2 is enabled, then the AVX2 implementation is used.
+/// Otherwise, at least on x86_64, the SSE2 implementation is used. (And
+/// in some niche cases, if SSE2 isn't available, then the architecture
+/// independent fallback implementation is used.)
+///
+/// After the first call to this function, the atomic global is replaced with
+/// the specific AVX2, SSE2 or fallback routine chosen. Subsequent calls then
+/// will directly call the chosen routine instead of needing to go through the
+/// CPU feature detection branching again.
+///
+/// This particular macro is specifically written to provide the implementation
+/// of functions with the following signature:
+///
+/// ```ignore
+/// fn memchr(needle1: u8, start: *const u8, end: *const u8) -> Option<usize>;
+/// ```
+///
+/// Where you can also have `memchr2` and `memchr3`, but with `needle2` and
+/// `needle3`, respectively. The `start` and `end` parameters correspond to the
+/// start and end of the haystack, respectively.
+///
+/// We use raw pointers here instead of the more obvious `haystack: &[u8]` so
+/// that the function is compatible with our lower level iterator logic that
+/// operates on raw pointers. We use this macro to implement "raw" memchr
+/// routines with the signature above, and then define memchr routines using
+/// regular slices on top of them.
+///
+/// Note that we use `#[cfg(target_feature = "sse2")]` below even though
+/// it shouldn't be strictly necessary because without it, it seems to
+/// cause the compiler to blow up. I guess it can't handle a function
+/// pointer being created with a sse target feature? Dunno. See the
+/// `build-for-x86-64-but-non-sse-target` CI job if you want to experiment with
+/// this.
+///
+/// # Safety
+///
+/// Primarily callers must that `$fnty` is a correct function pointer type and
+/// not something else.
+///
+/// Callers must also ensure that `$memchrty::$memchrfind` corresponds to a
+/// routine that returns a valid function pointer when a match is found. That
+/// is, a pointer that is `>= start` and `< end`.
+///
+/// Callers must also ensure that the `$hay_start` and `$hay_end` identifiers
+/// correspond to valid pointers.
+macro_rules! unsafe_ifunc {
+ (
+ $memchrty:ident,
+ $memchrfind:ident,
+ $fnty:ty,
+ $retty:ty,
+ $hay_start:ident,
+ $hay_end:ident,
+ $($needle:ident),+
+ ) => {{
+ #![allow(unused_unsafe)]
+
+ use core::sync::atomic::{AtomicPtr, Ordering};
+
+ type Fn = *mut ();
+ type RealFn = $fnty;
+ static FN: AtomicPtr<()> = AtomicPtr::new(detect as Fn);
+
+ #[cfg(target_feature = "sse2")]
+ #[target_feature(enable = "sse2", enable = "avx2")]
+ unsafe fn find_avx2(
+ $($needle: u8),+,
+ $hay_start: *const u8,
+ $hay_end: *const u8,
+ ) -> $retty {
+ use crate::arch::x86_64::avx2::memchr::$memchrty;
+ $memchrty::new_unchecked($($needle),+)
+ .$memchrfind($hay_start, $hay_end)
+ }
+
+ #[cfg(target_feature = "sse2")]
+ #[target_feature(enable = "sse2")]
+ unsafe fn find_sse2(
+ $($needle: u8),+,
+ $hay_start: *const u8,
+ $hay_end: *const u8,
+ ) -> $retty {
+ use crate::arch::x86_64::sse2::memchr::$memchrty;
+ $memchrty::new_unchecked($($needle),+)
+ .$memchrfind($hay_start, $hay_end)
+ }
+
+ unsafe fn find_fallback(
+ $($needle: u8),+,
+ $hay_start: *const u8,
+ $hay_end: *const u8,
+ ) -> $retty {
+ use crate::arch::all::memchr::$memchrty;
+ $memchrty::new($($needle),+).$memchrfind($hay_start, $hay_end)
+ }
+
+ unsafe fn detect(
+ $($needle: u8),+,
+ $hay_start: *const u8,
+ $hay_end: *const u8,
+ ) -> $retty {
+ let fun = {
+ #[cfg(not(target_feature = "sse2"))]
+ {
+ debug!(
+ "no sse2 feature available, using fallback for {}",
+ stringify!($memchrty),
+ );
+ find_fallback as RealFn
+ }
+ #[cfg(target_feature = "sse2")]
+ {
+ use crate::arch::x86_64::{sse2, avx2};
+ if avx2::memchr::$memchrty::is_available() {
+ debug!("chose AVX2 for {}", stringify!($memchrty));
+ find_avx2 as RealFn
+ } else if sse2::memchr::$memchrty::is_available() {
+ debug!("chose SSE2 for {}", stringify!($memchrty));
+ find_sse2 as RealFn
+ } else {
+ debug!("chose fallback for {}", stringify!($memchrty));
+ find_fallback as RealFn
+ }
+ }
+ };
+ FN.store(fun as Fn, Ordering::Relaxed);
+ // SAFETY: The only thing we need to uphold here is the
+ // `#[target_feature]` requirements. Since we check is_available
+ // above before using the corresponding implementation, we are
+ // guaranteed to only call code that is supported on the current
+ // CPU.
+ fun($($needle),+, $hay_start, $hay_end)
+ }
+
+ // SAFETY: By virtue of the caller contract, RealFn is a function
+ // pointer, which is always safe to transmute with a *mut (). Also,
+ // since we use $memchrty::is_available, it is guaranteed to be safe
+ // to call $memchrty::$memchrfind.
+ unsafe {
+ let fun = FN.load(Ordering::Relaxed);
+ core::mem::transmute::<Fn, RealFn>(fun)(
+ $($needle),+,
+ $hay_start,
+ $hay_end,
+ )
+ }
+ }};
+}
+
+// The routines below dispatch to AVX2, SSE2 or a fallback routine based on
+// what's available in the current environment. The secret sauce here is that
+// we only check for which one to use approximately once, and then "cache" that
+// choice into a global function pointer. Subsequent invocations then just call
+// the appropriate function directly.
+
+/// memchr, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::find_raw`.
+#[inline(always)]
+pub(crate) fn memchr_raw(
+ n1: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ // SAFETY: We provide a valid function pointer type.
+ unsafe_ifunc!(
+ One,
+ find_raw,
+ unsafe fn(u8, *const u8, *const u8) -> Option<*const u8>,
+ Option<*const u8>,
+ start,
+ end,
+ n1
+ )
+}
+
+/// memrchr, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::rfind_raw`.
+#[inline(always)]
+pub(crate) fn memrchr_raw(
+ n1: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ // SAFETY: We provide a valid function pointer type.
+ unsafe_ifunc!(
+ One,
+ rfind_raw,
+ unsafe fn(u8, *const u8, *const u8) -> Option<*const u8>,
+ Option<*const u8>,
+ start,
+ end,
+ n1
+ )
+}
+
+/// memchr2, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Two::find_raw`.
+#[inline(always)]
+pub(crate) fn memchr2_raw(
+ n1: u8,
+ n2: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ // SAFETY: We provide a valid function pointer type.
+ unsafe_ifunc!(
+ Two,
+ find_raw,
+ unsafe fn(u8, u8, *const u8, *const u8) -> Option<*const u8>,
+ Option<*const u8>,
+ start,
+ end,
+ n1,
+ n2
+ )
+}
+
+/// memrchr2, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Two::rfind_raw`.
+#[inline(always)]
+pub(crate) fn memrchr2_raw(
+ n1: u8,
+ n2: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ // SAFETY: We provide a valid function pointer type.
+ unsafe_ifunc!(
+ Two,
+ rfind_raw,
+ unsafe fn(u8, u8, *const u8, *const u8) -> Option<*const u8>,
+ Option<*const u8>,
+ start,
+ end,
+ n1,
+ n2
+ )
+}
+
+/// memchr3, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Three::find_raw`.
+#[inline(always)]
+pub(crate) fn memchr3_raw(
+ n1: u8,
+ n2: u8,
+ n3: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ // SAFETY: We provide a valid function pointer type.
+ unsafe_ifunc!(
+ Three,
+ find_raw,
+ unsafe fn(u8, u8, u8, *const u8, *const u8) -> Option<*const u8>,
+ Option<*const u8>,
+ start,
+ end,
+ n1,
+ n2,
+ n3
+ )
+}
+
+/// memrchr3, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Three::rfind_raw`.
+#[inline(always)]
+pub(crate) fn memrchr3_raw(
+ n1: u8,
+ n2: u8,
+ n3: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ // SAFETY: We provide a valid function pointer type.
+ unsafe_ifunc!(
+ Three,
+ rfind_raw,
+ unsafe fn(u8, u8, u8, *const u8, *const u8) -> Option<*const u8>,
+ Option<*const u8>,
+ start,
+ end,
+ n1,
+ n2,
+ n3
+ )
+}
+
+/// Count all matching bytes, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::count_raw`.
+#[inline(always)]
+pub(crate) fn count_raw(n1: u8, start: *const u8, end: *const u8) -> usize {
+ // SAFETY: We provide a valid function pointer type.
+ unsafe_ifunc!(
+ One,
+ count_raw,
+ unsafe fn(u8, *const u8, *const u8) -> usize,
+ usize,
+ start,
+ end,
+ n1
+ )
+}
diff --git a/vendor/memchr/src/arch/x86_64/mod.rs b/vendor/memchr/src/arch/x86_64/mod.rs
new file mode 100644
index 000000000..5dad72182
--- /dev/null
+++ b/vendor/memchr/src/arch/x86_64/mod.rs
@@ -0,0 +1,8 @@
+/*!
+Vector algorithms for the `x86_64` target.
+*/
+
+pub mod avx2;
+pub mod sse2;
+
+pub(crate) mod memchr;
diff --git a/vendor/memchr/src/arch/x86_64/sse2/memchr.rs b/vendor/memchr/src/arch/x86_64/sse2/memchr.rs
new file mode 100644
index 000000000..c6f75df4a
--- /dev/null
+++ b/vendor/memchr/src/arch/x86_64/sse2/memchr.rs
@@ -0,0 +1,1077 @@
+/*!
+This module defines 128-bit vector implementations of `memchr` and friends.
+
+The main types in this module are [`One`], [`Two`] and [`Three`]. They are for
+searching for one, two or three distinct bytes, respectively, in a haystack.
+Each type also has corresponding double ended iterators. These searchers are
+typically much faster than scalar routines accomplishing the same task.
+
+The `One` searcher also provides a [`One::count`] routine for efficiently
+counting the number of times a single byte occurs in a haystack. This is
+useful, for example, for counting the number of lines in a haystack. This
+routine exists because it is usually faster, especially with a high match
+count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its
+`Iterator::count` implementation to use this routine.)
+
+Only one, two and three bytes are supported because three bytes is about
+the point where one sees diminishing returns. Beyond this point and it's
+probably (but not necessarily) better to just use a simple `[bool; 256]` array
+or similar. However, it depends mightily on the specific work-load and the
+expected match frequency.
+*/
+
+use core::arch::x86_64::__m128i;
+
+use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector};
+
+/// Finds all occurrences of a single byte in a haystack.
+#[derive(Clone, Copy, Debug)]
+pub struct One(generic::One<__m128i>);
+
+impl One {
+ /// Create a new searcher that finds occurrences of the needle byte given.
+ ///
+ /// This particular searcher is specialized to use SSE2 vector instructions
+ /// that typically make it quite fast.
+ ///
+ /// If SSE2 is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle: u8) -> Option<One> {
+ if One::is_available() {
+ // SAFETY: we check that sse2 is available above.
+ unsafe { Some(One::new_unchecked(needle)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to SSE2 vectors and routines without
+ /// checking that SSE2 is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute `sse2` instructions
+ /// in the current environment.
+ ///
+ /// Note that it is a common misconception that if one compiles for an
+ /// `x86_64` target, then they therefore automatically have access to SSE2
+ /// instructions. While this is almost always the case, it isn't true in
+ /// 100% of cases.
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ pub unsafe fn new_unchecked(needle: u8) -> One {
+ One(generic::One::new(needle))
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`One::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `One::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(target_feature = "sse2")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "sse2"))]
+ {
+ false
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Counts all occurrences of this byte in the given haystack.
+ #[inline]
+ pub fn count(&self, haystack: &[u8]) -> usize {
+ // SAFETY: All of our pointers are derived directly from a borrowed
+ // slice, which is guaranteed to be valid.
+ unsafe {
+ let start = haystack.as_ptr();
+ let end = start.add(haystack.len());
+ self.count_raw(start, end)
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ });
+ }
+ // SAFETY: Building a `One` means it's safe to call 'sse2' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ //
+ // Note that we could call `self.0.find_raw` directly here. But that
+ // means we'd have to annotate this routine with `target_feature`.
+ // Which is fine, because this routine is `unsafe` anyway and the
+ // `target_feature` obligation is met by virtue of building a `One`.
+ // The real problem is that a routine with a `target_feature`
+ // annotation generally can't be inlined into caller code unless the
+ // caller code has the same target feature annotations. Which is maybe
+ // okay for SSE2, but we do the same thing for AVX2 where caller code
+ // probably usually doesn't have AVX2 enabled. That means that this
+ // routine can be inlined which will handle some of the short-haystack
+ // cases above without touching the architecture specific code.
+ self.find_raw_impl(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::rev_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ });
+ }
+ // SAFETY: Building a `One` means it's safe to call 'sse2' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ //
+ // See note in forward routine above for why we don't just call
+ // `self.0.rfind_raw` directly here.
+ self.rfind_raw_impl(start, end)
+ }
+
+ /// Counts all occurrences of this byte in the given haystack represented
+ /// by raw pointers.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `0` will always be returned.
+ #[inline]
+ pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize {
+ if start >= end {
+ return 0;
+ }
+ if end.distance(start) < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::count_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ });
+ }
+ // SAFETY: Building a `One` means it's safe to call 'sse2' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ self.count_raw_impl(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `sse2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn find_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.find_raw(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `sse2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn rfind_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.rfind_raw(start, end)
+ }
+
+ /// Execute a count using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`One::count_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `One`, which can only be constructed
+ /// when it is safe to call `sse2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn count_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> usize {
+ self.0.count_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle byte in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> {
+ OneIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of a single byte in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`One::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`One`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct OneIter<'a, 'h> {
+ searcher: &'a One,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for OneIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count(|s, e| {
+ // SAFETY: We rely on our generic iterator to return valid start
+ // and end pointers.
+ unsafe { self.searcher.count_raw(s, e) }
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {}
+
+/// Finds all occurrences of two bytes in a haystack.
+///
+/// That is, this reports matches of one of two possible bytes. For example,
+/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`,
+/// `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub struct Two(generic::Two<__m128i>);
+
+impl Two {
+ /// Create a new searcher that finds occurrences of the needle bytes given.
+ ///
+ /// This particular searcher is specialized to use SSE2 vector instructions
+ /// that typically make it quite fast.
+ ///
+ /// If SSE2 is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle1: u8, needle2: u8) -> Option<Two> {
+ if Two::is_available() {
+ // SAFETY: we check that sse2 is available above.
+ unsafe { Some(Two::new_unchecked(needle1, needle2)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to SSE2 vectors and routines without
+ /// checking that SSE2 is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute `sse2` instructions
+ /// in the current environment.
+ ///
+ /// Note that it is a common misconception that if one compiles for an
+ /// `x86_64` target, then they therefore automatically have access to SSE2
+ /// instructions. While this is almost always the case, it isn't true in
+ /// 100% of cases.
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two {
+ Two(generic::Two::new(needle1, needle2))
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Two::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `Two::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(target_feature = "sse2")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "sse2"))]
+ {
+ false
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.0.needle1() || b == self.0.needle2()
+ });
+ }
+ // SAFETY: Building a `Two` means it's safe to call 'sse2' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ //
+ // Note that we could call `self.0.find_raw` directly here. But that
+ // means we'd have to annotate this routine with `target_feature`.
+ // Which is fine, because this routine is `unsafe` anyway and the
+ // `target_feature` obligation is met by virtue of building a `Two`.
+ // The real problem is that a routine with a `target_feature`
+ // annotation generally can't be inlined into caller code unless the
+ // caller code has the same target feature annotations. Which is maybe
+ // okay for SSE2, but we do the same thing for AVX2 where caller code
+ // probably usually doesn't have AVX2 enabled. That means that this
+ // routine can be inlined which will handle some of the short-haystack
+ // cases above without touching the architecture specific code.
+ self.find_raw_impl(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::rev_byte_by_byte(start, end, |b| {
+ b == self.0.needle1() || b == self.0.needle2()
+ });
+ }
+ // SAFETY: Building a `Two` means it's safe to call 'sse2' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ //
+ // See note in forward routine above for why we don't just call
+ // `self.0.rfind_raw` directly here.
+ self.rfind_raw_impl(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Two::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Two`, which can only be constructed
+ /// when it is safe to call `sse2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn find_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.find_raw(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Two::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Two`, which can only be constructed
+ /// when it is safe to call `sse2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn rfind_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.rfind_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle bytes in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> {
+ TwoIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of two possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`Two::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`Two`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct TwoIter<'a, 'h> {
+ searcher: &'a Two,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for TwoIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {}
+
+/// Finds all occurrences of three bytes in a haystack.
+///
+/// That is, this reports matches of one of three possible bytes. For example,
+/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets
+/// `0`, `2`, `3`, `4` and `5`.
+#[derive(Clone, Copy, Debug)]
+pub struct Three(generic::Three<__m128i>);
+
+impl Three {
+ /// Create a new searcher that finds occurrences of the needle bytes given.
+ ///
+ /// This particular searcher is specialized to use SSE2 vector instructions
+ /// that typically make it quite fast.
+ ///
+ /// If SSE2 is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option<Three> {
+ if Three::is_available() {
+ // SAFETY: we check that sse2 is available above.
+ unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new finder specific to SSE2 vectors and routines without
+ /// checking that SSE2 is available.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that it is safe to execute `sse2` instructions
+ /// in the current environment.
+ ///
+ /// Note that it is a common misconception that if one compiles for an
+ /// `x86_64` target, then they therefore automatically have access to SSE2
+ /// instructions. While this is almost always the case, it isn't true in
+ /// 100% of cases.
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ pub unsafe fn new_unchecked(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ ) -> Three {
+ Three(generic::Three::new(needle1, needle2, needle3))
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Three::new`] will return
+ /// a `Some` value. Similarly, when it is false, it is guaranteed that
+ /// `Three::new` will return a `None` value.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(target_feature = "sse2")]
+ {
+ true
+ }
+ #[cfg(not(target_feature = "sse2"))]
+ {
+ false
+ }
+ }
+
+ /// Return the first occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `find_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.find_raw(s, e)
+ })
+ }
+ }
+
+ /// Return the last occurrence of one of the needle bytes in the given
+ /// haystack. If no such occurrence exists, then `None` is returned.
+ ///
+ /// The occurrence is reported as an offset into `haystack`. Its maximum
+ /// value is `haystack.len() - 1`.
+ #[inline]
+ pub fn rfind(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it
+ // falls within the bounds of the start and end pointers.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |s, e| {
+ self.rfind_raw(s, e)
+ })
+ }
+ }
+
+ /// Like `find`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn find_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::fwd_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ || b == self.0.needle2()
+ || b == self.0.needle3()
+ });
+ }
+ // SAFETY: Building a `Three` means it's safe to call 'sse2' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ //
+ // Note that we could call `self.0.find_raw` directly here. But that
+ // means we'd have to annotate this routine with `target_feature`.
+ // Which is fine, because this routine is `unsafe` anyway and the
+ // `target_feature` obligation is met by virtue of building a `Three`.
+ // The real problem is that a routine with a `target_feature`
+ // annotation generally can't be inlined into caller code unless the
+ // caller code has the same target feature annotations. Which is maybe
+ // okay for SSE2, but we do the same thing for AVX2 where caller code
+ // probably usually doesn't have AVX2 enabled. That means that this
+ // routine can be inlined which will handle some of the short-haystack
+ // cases above without touching the architecture specific code.
+ self.find_raw_impl(start, end)
+ }
+
+ /// Like `rfind`, but accepts and returns raw pointers.
+ ///
+ /// When a match is found, the pointer returned is guaranteed to be
+ /// `>= start` and `< end`.
+ ///
+ /// This routine is useful if you're already using raw pointers and would
+ /// like to avoid converting back to a slice before executing a search.
+ ///
+ /// # Safety
+ ///
+ /// * Both `start` and `end` must be valid for reads.
+ /// * Both `start` and `end` must point to an initialized value.
+ /// * Both `start` and `end` must point to the same allocated object and
+ /// must either be in bounds or at most one byte past the end of the
+ /// allocated object.
+ /// * Both `start` and `end` must be _derived from_ a pointer to the same
+ /// object.
+ /// * The distance between `start` and `end` must not overflow `isize`.
+ /// * The distance being in bounds must not rely on "wrapping around" the
+ /// address space.
+ ///
+ /// Note that callers may pass a pair of pointers such that `start >= end`.
+ /// In that case, `None` will always be returned.
+ #[inline]
+ pub unsafe fn rfind_raw(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ if start >= end {
+ return None;
+ }
+ if end.distance(start) < __m128i::BYTES {
+ // SAFETY: We require the caller to pass valid start/end pointers.
+ return generic::rev_byte_by_byte(start, end, |b| {
+ b == self.0.needle1()
+ || b == self.0.needle2()
+ || b == self.0.needle3()
+ });
+ }
+ // SAFETY: Building a `Three` means it's safe to call 'sse2' routines.
+ // Also, we've checked that our haystack is big enough to run on the
+ // vector routine. Pointer validity is caller's responsibility.
+ //
+ // See note in forward routine above for why we don't just call
+ // `self.0.rfind_raw` directly here.
+ self.rfind_raw_impl(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Three::find_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Three`, which can only be constructed
+ /// when it is safe to call `sse2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn find_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.find_raw(start, end)
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as [`Three::rfind_raw`], except the distance between `start` and
+ /// `end` must be at least the size of an SSE2 vector (in bytes).
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Three`, which can only be constructed
+ /// when it is safe to call `sse2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn rfind_raw_impl(
+ &self,
+ start: *const u8,
+ end: *const u8,
+ ) -> Option<*const u8> {
+ self.0.rfind_raw(start, end)
+ }
+
+ /// Returns an iterator over all occurrences of the needle byte in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> {
+ ThreeIter { searcher: self, it: generic::Iter::new(haystack) }
+ }
+}
+
+/// An iterator over all occurrences of three possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`Three::iter`] method.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'a` refers to the lifetime of the underlying [`Three`] searcher.
+/// * `'h` refers to the lifetime of the haystack being searched.
+#[derive(Clone, Debug)]
+pub struct ThreeIter<'a, 'h> {
+ searcher: &'a Three,
+ it: generic::Iter<'h>,
+}
+
+impl<'a, 'h> Iterator for ThreeIter<'a, 'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'find_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: We rely on the generic iterator to provide valid start
+ // and end pointers, but we guarantee that any pointer returned by
+ // 'rfind_raw' falls within the bounds of the start and end pointer.
+ unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) }
+ }
+}
+
+impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ define_memchr_quickcheck!(super);
+
+ #[test]
+ fn forward_one() {
+ crate::tests::memchr::Runner::new(1).forward_iter(
+ |haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_one() {
+ crate::tests::memchr::Runner::new(1).reverse_iter(
+ |haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).rev().collect())
+ },
+ )
+ }
+
+ #[test]
+ fn count_one() {
+ crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| {
+ Some(One::new(needles[0])?.iter(haystack).count())
+ })
+ }
+
+ #[test]
+ fn forward_two() {
+ crate::tests::memchr::Runner::new(2).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(Two::new(n1, n2)?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_two() {
+ crate::tests::memchr::Runner::new(2).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(Two::new(n1, n2)?.iter(haystack).rev().collect())
+ },
+ )
+ }
+
+ #[test]
+ fn forward_three() {
+ crate::tests::memchr::Runner::new(3).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(Three::new(n1, n2, n3)?.iter(haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse_three() {
+ crate::tests::memchr::Runner::new(3).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect())
+ },
+ )
+ }
+}
diff --git a/vendor/memchr/src/arch/x86_64/sse2/mod.rs b/vendor/memchr/src/arch/x86_64/sse2/mod.rs
new file mode 100644
index 000000000..bcb830790
--- /dev/null
+++ b/vendor/memchr/src/arch/x86_64/sse2/mod.rs
@@ -0,0 +1,6 @@
+/*!
+Algorithms for the `x86_64` target using 128-bit vectors via SSE2.
+*/
+
+pub mod memchr;
+pub mod packedpair;
diff --git a/vendor/memchr/src/arch/x86_64/sse2/packedpair.rs b/vendor/memchr/src/arch/x86_64/sse2/packedpair.rs
new file mode 100644
index 000000000..c8b5b9999
--- /dev/null
+++ b/vendor/memchr/src/arch/x86_64/sse2/packedpair.rs
@@ -0,0 +1,232 @@
+/*!
+A 128-bit vector implementation of the "packed pair" SIMD algorithm.
+
+The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main
+difference is that it (by default) uses a background distribution of byte
+frequencies to heuristically select the pair of bytes to search for.
+
+[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last
+*/
+
+use core::arch::x86_64::__m128i;
+
+use crate::arch::{all::packedpair::Pair, generic::packedpair};
+
+/// A "packed pair" finder that uses 128-bit vector operations.
+///
+/// This finder picks two bytes that it believes have high predictive power
+/// for indicating an overall match of a needle. Depending on whether
+/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets
+/// where the needle matches or could match. In the prefilter case, candidates
+/// are reported whenever the [`Pair`] of bytes given matches.
+#[derive(Clone, Copy, Debug)]
+pub struct Finder(packedpair::Finder<__m128i>);
+
+impl Finder {
+ /// Create a new pair searcher. The searcher returned can either report
+ /// exact matches of `needle` or act as a prefilter and report candidate
+ /// positions of `needle`.
+ ///
+ /// If SSE2 is unavailable in the current environment or if a [`Pair`]
+ /// could not be constructed from the needle given, then `None` is
+ /// returned.
+ #[inline]
+ pub fn new(needle: &[u8]) -> Option<Finder> {
+ Finder::with_pair(needle, Pair::new(needle)?)
+ }
+
+ /// Create a new "packed pair" finder using the pair of bytes given.
+ ///
+ /// This constructor permits callers to control precisely which pair of
+ /// bytes is used as a predicate.
+ ///
+ /// If SSE2 is unavailable in the current environment, then `None` is
+ /// returned.
+ #[inline]
+ pub fn with_pair(needle: &[u8], pair: Pair) -> Option<Finder> {
+ if Finder::is_available() {
+ // SAFETY: we check that sse2 is available above. We are also
+ // guaranteed to have needle.len() > 1 because we have a valid
+ // Pair.
+ unsafe { Some(Finder::with_pair_impl(needle, pair)) }
+ } else {
+ None
+ }
+ }
+
+ /// Create a new `Finder` specific to SSE2 vectors and routines.
+ ///
+ /// # Safety
+ ///
+ /// Same as the safety for `packedpair::Finder::new`, and callers must also
+ /// ensure that SSE2 is available.
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder {
+ let finder = packedpair::Finder::<__m128i>::new(needle, pair);
+ Finder(finder)
+ }
+
+ /// Returns true when this implementation is available in the current
+ /// environment.
+ ///
+ /// When this is true, it is guaranteed that [`Finder::with_pair`] will
+ /// return a `Some` value. Similarly, when it is false, it is guaranteed
+ /// that `Finder::with_pair` will return a `None` value. Notice that this
+ /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely,
+ /// even when `Finder::is_available` is true, it is not guaranteed that a
+ /// valid [`Pair`] can be found from the needle given.
+ ///
+ /// Note also that for the lifetime of a single program, if this returns
+ /// true then it will always return true.
+ #[inline]
+ pub fn is_available() -> bool {
+ #[cfg(not(target_feature = "sse2"))]
+ {
+ false
+ }
+ #[cfg(target_feature = "sse2")]
+ {
+ true
+ }
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ #[inline]
+ pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> {
+ // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines.
+ unsafe { self.find_impl(haystack, needle) }
+ }
+
+ /// Run this finder on the given haystack as a prefilter.
+ ///
+ /// If a candidate match is found, then an offset where the needle *could*
+ /// begin in the haystack is returned.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ #[inline]
+ pub fn find_prefilter(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines.
+ unsafe { self.find_prefilter_impl(haystack) }
+ }
+
+ /// Execute a search using SSE2 vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ ///
+ /// # Safety
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Finder`, which can only be constructed
+ /// when it is safe to call `sse2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn find_impl(
+ &self,
+ haystack: &[u8],
+ needle: &[u8],
+ ) -> Option<usize> {
+ self.0.find(haystack, needle)
+ }
+
+ /// Execute a prefilter search using SSE2 vectors and routines.
+ ///
+ /// # Panics
+ ///
+ /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
+ ///
+ /// # Safety
+ ///
+ /// (The target feature safety obligation is automatically fulfilled by
+ /// virtue of being a method on `Finder`, which can only be constructed
+ /// when it is safe to call `sse2` routines.)
+ #[target_feature(enable = "sse2")]
+ #[inline]
+ unsafe fn find_prefilter_impl(&self, haystack: &[u8]) -> Option<usize> {
+ self.0.find_prefilter(haystack)
+ }
+
+ /// Returns the pair of offsets (into the needle) used to check as a
+ /// predicate before confirming whether a needle exists at a particular
+ /// position.
+ #[inline]
+ pub fn pair(&self) -> &Pair {
+ self.0.pair()
+ }
+
+ /// Returns the minimum haystack length that this `Finder` can search.
+ ///
+ /// Using a haystack with length smaller than this in a search will result
+ /// in a panic. The reason for this restriction is that this finder is
+ /// meant to be a low-level component that is part of a larger substring
+ /// strategy. In that sense, it avoids trying to handle all cases and
+ /// instead only handles the cases that it can handle very well.
+ #[inline]
+ pub fn min_haystack_len(&self) -> usize {
+ self.0.min_haystack_len()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn find(haystack: &[u8], needle: &[u8]) -> Option<Option<usize>> {
+ let f = Finder::new(needle)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find(haystack, needle))
+ }
+
+ define_substring_forward_quickcheck!(find);
+
+ #[test]
+ fn forward_substring() {
+ crate::tests::substring::Runner::new().fwd(find).run()
+ }
+
+ #[test]
+ fn forward_packedpair() {
+ fn find(
+ haystack: &[u8],
+ needle: &[u8],
+ index1: u8,
+ index2: u8,
+ ) -> Option<Option<usize>> {
+ let pair = Pair::with_indices(needle, index1, index2)?;
+ let f = Finder::with_pair(needle, pair)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find(haystack, needle))
+ }
+ crate::tests::packedpair::Runner::new().fwd(find).run()
+ }
+
+ #[test]
+ fn forward_packedpair_prefilter() {
+ fn find(
+ haystack: &[u8],
+ needle: &[u8],
+ index1: u8,
+ index2: u8,
+ ) -> Option<Option<usize>> {
+ let pair = Pair::with_indices(needle, index1, index2)?;
+ let f = Finder::with_pair(needle, pair)?;
+ if haystack.len() < f.min_haystack_len() {
+ return None;
+ }
+ Some(f.find_prefilter(haystack))
+ }
+ crate::tests::packedpair::Runner::new().fwd(find).run()
+ }
+}
diff --git a/vendor/memchr/src/cow.rs b/vendor/memchr/src/cow.rs
index 0b7d0dad0..f29164572 100644
--- a/vendor/memchr/src/cow.rs
+++ b/vendor/memchr/src/cow.rs
@@ -4,22 +4,23 @@ use core::ops;
///
/// The purpose of this type is to permit usage of a "borrowed or owned
/// byte string" in a way that keeps std/no-std compatibility. That is, in
-/// no-std mode, this type devolves into a simple &[u8] with no owned variant
-/// available. We can't just use a plain Cow because Cow is not in core.
+/// no-std/alloc mode, this type devolves into a simple &[u8] with no owned
+/// variant available. We can't just use a plain Cow because Cow is not in
+/// core.
#[derive(Clone, Debug)]
pub struct CowBytes<'a>(Imp<'a>);
-// N.B. We don't use std::borrow::Cow here since we can get away with a
+// N.B. We don't use alloc::borrow::Cow here since we can get away with a
// Box<[u8]> for our use case, which is 1/3 smaller than the Vec<u8> that
// a Cow<[u8]> would use.
-#[cfg(feature = "std")]
+#[cfg(feature = "alloc")]
#[derive(Clone, Debug)]
enum Imp<'a> {
Borrowed(&'a [u8]),
- Owned(Box<[u8]>),
+ Owned(alloc::boxed::Box<[u8]>),
}
-#[cfg(not(feature = "std"))]
+#[cfg(not(feature = "alloc"))]
#[derive(Clone, Debug)]
struct Imp<'a>(&'a [u8]);
@@ -35,21 +36,21 @@ impl<'a> ops::Deref for CowBytes<'a> {
impl<'a> CowBytes<'a> {
/// Create a new borrowed CowBytes.
#[inline(always)]
- pub fn new<B: ?Sized + AsRef<[u8]>>(bytes: &'a B) -> CowBytes<'a> {
+ pub(crate) fn new<B: ?Sized + AsRef<[u8]>>(bytes: &'a B) -> CowBytes<'a> {
CowBytes(Imp::new(bytes.as_ref()))
}
/// Create a new owned CowBytes.
- #[cfg(feature = "std")]
+ #[cfg(feature = "alloc")]
#[inline(always)]
- pub fn new_owned(bytes: Box<[u8]>) -> CowBytes<'static> {
+ fn new_owned(bytes: alloc::boxed::Box<[u8]>) -> CowBytes<'static> {
CowBytes(Imp::Owned(bytes))
}
/// Return a borrowed byte string, regardless of whether this is an owned
/// or borrowed byte string internally.
#[inline(always)]
- pub fn as_slice(&self) -> &[u8] {
+ pub(crate) fn as_slice(&self) -> &[u8] {
self.0.as_slice()
}
@@ -57,39 +58,48 @@ impl<'a> CowBytes<'a> {
///
/// If this is already an owned byte string internally, then this is a
/// no-op. Otherwise, the internal byte string is copied.
- #[cfg(feature = "std")]
+ #[cfg(feature = "alloc")]
#[inline(always)]
- pub fn into_owned(self) -> CowBytes<'static> {
+ pub(crate) fn into_owned(self) -> CowBytes<'static> {
match self.0 {
- Imp::Borrowed(b) => CowBytes::new_owned(Box::from(b)),
+ Imp::Borrowed(b) => {
+ CowBytes::new_owned(alloc::boxed::Box::from(b))
+ }
Imp::Owned(b) => CowBytes::new_owned(b),
}
}
}
impl<'a> Imp<'a> {
- #[cfg(feature = "std")]
- #[inline(always)]
- pub fn new(bytes: &'a [u8]) -> Imp<'a> {
- Imp::Borrowed(bytes)
- }
-
- #[cfg(not(feature = "std"))]
#[inline(always)]
pub fn new(bytes: &'a [u8]) -> Imp<'a> {
- Imp(bytes)
+ #[cfg(feature = "alloc")]
+ {
+ Imp::Borrowed(bytes)
+ }
+ #[cfg(not(feature = "alloc"))]
+ {
+ Imp(bytes)
+ }
}
- #[cfg(feature = "std")]
+ #[cfg(feature = "alloc")]
#[inline(always)]
pub fn as_slice(&self) -> &[u8] {
- match self {
- Imp::Owned(ref x) => x,
- Imp::Borrowed(x) => x,
+ #[cfg(feature = "alloc")]
+ {
+ match self {
+ Imp::Owned(ref x) => x,
+ Imp::Borrowed(x) => x,
+ }
+ }
+ #[cfg(not(feature = "alloc"))]
+ {
+ self.0
}
}
- #[cfg(not(feature = "std"))]
+ #[cfg(not(feature = "alloc"))]
#[inline(always)]
pub fn as_slice(&self) -> &[u8] {
self.0
diff --git a/vendor/memchr/src/ext.rs b/vendor/memchr/src/ext.rs
new file mode 100644
index 000000000..1bb21dd8f
--- /dev/null
+++ b/vendor/memchr/src/ext.rs
@@ -0,0 +1,52 @@
+/// A trait for adding some helper routines to pointers.
+pub(crate) trait Pointer {
+ /// Returns the distance, in units of `T`, between `self` and `origin`.
+ ///
+ /// # Safety
+ ///
+ /// Same as `ptr::offset_from` in addition to `self >= origin`.
+ unsafe fn distance(self, origin: Self) -> usize;
+
+ /// Casts this pointer to `usize`.
+ ///
+ /// Callers should not convert the `usize` back to a pointer if at all
+ /// possible. (And if you believe it's necessary, open an issue to discuss
+ /// why. Otherwise, it has the potential to violate pointer provenance.)
+ /// The purpose of this function is just to be able to do arithmetic, i.e.,
+ /// computing offsets or alignments.
+ fn as_usize(self) -> usize;
+}
+
+impl<T> Pointer for *const T {
+ unsafe fn distance(self, origin: *const T) -> usize {
+ // TODO: Replace with `ptr::sub_ptr` once stabilized.
+ usize::try_from(self.offset_from(origin)).unwrap_unchecked()
+ }
+
+ fn as_usize(self) -> usize {
+ self as usize
+ }
+}
+
+impl<T> Pointer for *mut T {
+ unsafe fn distance(self, origin: *mut T) -> usize {
+ (self as *const T).distance(origin as *const T)
+ }
+
+ fn as_usize(self) -> usize {
+ (self as *const T).as_usize()
+ }
+}
+
+/// A trait for adding some helper routines to raw bytes.
+pub(crate) trait Byte {
+ /// Converts this byte to a `char` if it's ASCII. Otherwise panics.
+ fn to_char(self) -> char;
+}
+
+impl Byte for u8 {
+ fn to_char(self) -> char {
+ assert!(self.is_ascii());
+ char::from(self)
+ }
+}
diff --git a/vendor/memchr/src/lib.rs b/vendor/memchr/src/lib.rs
index e0b4ce3fd..05b2c13f9 100644
--- a/vendor/memchr/src/lib.rs
+++ b/vendor/memchr/src/lib.rs
@@ -113,9 +113,9 @@ solution presented above, however, its throughput can easily be over an
order of magnitude faster. This is a good general purpose trade off to make.
You rarely lose, but often gain big.
-**NOTE:** The name `memchr` comes from the corresponding routine in libc. A key
-advantage of using this library is that its performance is not tied to its
-quality of implementation in the libc you happen to be using, which can vary
+**NOTE:** The name `memchr` comes from the corresponding routine in `libc`. A
+key advantage of using this library is that its performance is not tied to its
+quality of implementation in the `libc` you happen to be using, which can vary
greatly from platform to platform.
But what about substring search? This one is a bit more complicated. The
@@ -131,32 +131,58 @@ implementation in the standard library, even if only for searching on UTF-8?
The reason is that the implementation details for using SIMD in the standard
library haven't quite been worked out yet.
-**NOTE:** Currently, only `x86_64` targets have highly accelerated
-implementations of substring search. For `memchr`, all targets have
-somewhat-accelerated implementations, while only `x86_64` targets have highly
-accelerated implementations. This limitation is expected to be lifted once the
-standard library exposes a platform independent SIMD API.
+**NOTE:** Currently, only `x86_64`, `wasm32` and `aarch64` targets have vector
+accelerated implementations of `memchr` (and friends) and `memmem`.
# Crate features
-* **std** - When enabled (the default), this will permit this crate to use
- features specific to the standard library. Currently, the only thing used
- from the standard library is runtime SIMD CPU feature detection. This means
- that this feature must be enabled to get AVX accelerated routines. When
- `std` is not enabled, this crate will still attempt to use SSE2 accelerated
- routines on `x86_64`.
-* **libc** - When enabled (**not** the default), this library will use your
- platform's libc implementation of `memchr` (and `memrchr` on Linux). This
- can be useful on non-`x86_64` targets where the fallback implementation in
- this crate is not as good as the one found in your libc. All other routines
- (e.g., `memchr[23]` and substring search) unconditionally use the
- implementation in this crate.
+* **std** - When enabled (the default), this will permit features specific to
+the standard library. Currently, the only thing used from the standard library
+is runtime SIMD CPU feature detection. This means that this feature must be
+enabled to get AVX2 accelerated routines on `x86_64` targets without enabling
+the `avx2` feature at compile time, for example. When `std` is not enabled,
+this crate will still attempt to use SSE2 accelerated routines on `x86_64`. It
+will also use AVX2 accelerated routines when the `avx2` feature is enabled at
+compile time. In general, enable this feature if you can.
+* **alloc** - When enabled (the default), APIs in this crate requiring some
+kind of allocation will become available. For example, the
+[`memmem::Finder::into_ownedd`](crate::memmem::Finder::into_owned) API and the
+[`arch::all::shiftor`](crate::arch::all::shiftor) substring search
+implementation. Otherwise, this crate is designed from the ground up to be
+usable in core-only contexts, so the `alloc` feature doesn't add much
+currently. Notably, disabling `std` but enabling `alloc` will **not** result
+in the use of AVX2 on `x86_64` targets unless the `avx2` feature is enabled
+at compile time. (With `std` enabled, AVX2 can be used even without the `avx2`
+feature enabled at compile time by way of runtime CPU feature detection.)
+* **logging** - When enabled (disabled by default), the `log` crate is used
+to emit log messages about what kinds of `memchr` and `memmem` algorithms
+are used. Namely, both `memchr` and `memmem` have a number of different
+implementation choices depending on the target and CPU, and the log messages
+can help show what specific implementations are being used. Generally, this is
+useful for debugging performance issues.
+* **libc** - **DEPRECATED**. Previously, this enabled the use of the target's
+`memchr` function from whatever `libc` was linked into the program. This
+feature is now a no-op because this crate's implementation of `memchr` should
+now be sufficiently fast on a number of platforms that `libc` should no longer
+be needed. (This feature is somewhat of a holdover from this crate's origins.
+Originally, this crate was literally just a safe wrapper function around the
+`memchr` function from `libc`.)
*/
#![deny(missing_docs)]
-#![cfg_attr(not(feature = "std"), no_std)]
-// It's not worth trying to gate all code on just miri, so turn off relevant
-// dead code warnings.
+#![no_std]
+// It's just not worth trying to squash all dead code warnings. Pretty
+// unfortunate IMO. Not really sure how to fix this other than to either
+// live with it or sprinkle a whole mess of `cfg` annotations everywhere.
+#![cfg_attr(
+ not(any(
+ all(target_arch = "x86_64", target_feature = "sse2"),
+ target_arch = "wasm32",
+ target_arch = "aarch64",
+ )),
+ allow(dead_code)
+)]
+// Same deal for miri.
#![cfg_attr(miri, allow(dead_code, unused_macros))]
// Supporting 8-bit (or others) would be fine. If you need it, please submit a
@@ -168,14 +194,28 @@ standard library exposes a platform independent SIMD API.
)))]
compile_error!("memchr currently not supported on non-{16,32,64}");
+#[cfg(any(test, feature = "std"))]
+extern crate std;
+
+#[cfg(any(test, feature = "alloc"))]
+extern crate alloc;
+
pub use crate::memchr::{
memchr, memchr2, memchr2_iter, memchr3, memchr3_iter, memchr_iter,
memrchr, memrchr2, memrchr2_iter, memrchr3, memrchr3_iter, memrchr_iter,
Memchr, Memchr2, Memchr3,
};
+#[macro_use]
+mod macros;
+
+#[cfg(test)]
+#[macro_use]
+mod tests;
+
+pub mod arch;
mod cow;
+mod ext;
mod memchr;
pub mod memmem;
-#[cfg(test)]
-mod tests;
+mod vector;
diff --git a/vendor/memchr/src/macros.rs b/vendor/memchr/src/macros.rs
new file mode 100644
index 000000000..31b4ca381
--- /dev/null
+++ b/vendor/memchr/src/macros.rs
@@ -0,0 +1,20 @@
+// Some feature combinations result in some of these macros never being used.
+// Which is fine. Just squash the warnings.
+#![allow(unused_macros)]
+
+macro_rules! log {
+ ($($tt:tt)*) => {
+ #[cfg(feature = "logging")]
+ {
+ $($tt)*
+ }
+ }
+}
+
+macro_rules! debug {
+ ($($tt:tt)*) => { log!(log::debug!($($tt)*)) }
+}
+
+macro_rules! trace {
+ ($($tt:tt)*) => { log!(log::trace!($($tt)*)) }
+}
diff --git a/vendor/memchr/src/memchr.rs b/vendor/memchr/src/memchr.rs
new file mode 100644
index 000000000..68adb9a63
--- /dev/null
+++ b/vendor/memchr/src/memchr.rs
@@ -0,0 +1,903 @@
+use core::iter::Rev;
+
+use crate::arch::generic::memchr as generic;
+
+/// Search for the first occurrence of a byte in a slice.
+///
+/// This returns the index corresponding to the first occurrence of `needle` in
+/// `haystack`, or `None` if one is not found. If an index is returned, it is
+/// guaranteed to be less than `haystack.len()`.
+///
+/// While this is semantically the same as something like
+/// `haystack.iter().position(|&b| b == needle)`, this routine will attempt to
+/// use highly optimized vector operations that can be an order of magnitude
+/// faster (or more).
+///
+/// # Example
+///
+/// This shows how to find the first position of a byte in a byte string.
+///
+/// ```
+/// use memchr::memchr;
+///
+/// let haystack = b"the quick brown fox";
+/// assert_eq!(memchr(b'k', haystack), Some(8));
+/// ```
+#[inline]
+pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: memchr_raw, when a match is found, always returns a valid
+ // pointer between start and end.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |start, end| {
+ memchr_raw(needle, start, end)
+ })
+ }
+}
+
+/// Search for the last occurrence of a byte in a slice.
+///
+/// This returns the index corresponding to the last occurrence of `needle` in
+/// `haystack`, or `None` if one is not found. If an index is returned, it is
+/// guaranteed to be less than `haystack.len()`.
+///
+/// While this is semantically the same as something like
+/// `haystack.iter().rposition(|&b| b == needle)`, this routine will attempt to
+/// use highly optimized vector operations that can be an order of magnitude
+/// faster (or more).
+///
+/// # Example
+///
+/// This shows how to find the last position of a byte in a byte string.
+///
+/// ```
+/// use memchr::memrchr;
+///
+/// let haystack = b"the quick brown fox";
+/// assert_eq!(memrchr(b'o', haystack), Some(17));
+/// ```
+#[inline]
+pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: memrchr_raw, when a match is found, always returns a valid
+ // pointer between start and end.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |start, end| {
+ memrchr_raw(needle, start, end)
+ })
+ }
+}
+
+/// Search for the first occurrence of two possible bytes in a haystack.
+///
+/// This returns the index corresponding to the first occurrence of one of the
+/// needle bytes in `haystack`, or `None` if one is not found. If an index is
+/// returned, it is guaranteed to be less than `haystack.len()`.
+///
+/// While this is semantically the same as something like
+/// `haystack.iter().position(|&b| b == needle1 || b == needle2)`, this routine
+/// will attempt to use highly optimized vector operations that can be an order
+/// of magnitude faster (or more).
+///
+/// # Example
+///
+/// This shows how to find the first position of one of two possible bytes in a
+/// haystack.
+///
+/// ```
+/// use memchr::memchr2;
+///
+/// let haystack = b"the quick brown fox";
+/// assert_eq!(memchr2(b'k', b'q', haystack), Some(4));
+/// ```
+#[inline]
+pub fn memchr2(needle1: u8, needle2: u8, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: memchr2_raw, when a match is found, always returns a valid
+ // pointer between start and end.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |start, end| {
+ memchr2_raw(needle1, needle2, start, end)
+ })
+ }
+}
+
+/// Search for the last occurrence of two possible bytes in a haystack.
+///
+/// This returns the index corresponding to the last occurrence of one of the
+/// needle bytes in `haystack`, or `None` if one is not found. If an index is
+/// returned, it is guaranteed to be less than `haystack.len()`.
+///
+/// While this is semantically the same as something like
+/// `haystack.iter().rposition(|&b| b == needle1 || b == needle2)`, this
+/// routine will attempt to use highly optimized vector operations that can be
+/// an order of magnitude faster (or more).
+///
+/// # Example
+///
+/// This shows how to find the last position of one of two possible bytes in a
+/// haystack.
+///
+/// ```
+/// use memchr::memrchr2;
+///
+/// let haystack = b"the quick brown fox";
+/// assert_eq!(memrchr2(b'k', b'o', haystack), Some(17));
+/// ```
+#[inline]
+pub fn memrchr2(needle1: u8, needle2: u8, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: memrchr2_raw, when a match is found, always returns a valid
+ // pointer between start and end.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |start, end| {
+ memrchr2_raw(needle1, needle2, start, end)
+ })
+ }
+}
+
+/// Search for the first occurrence of three possible bytes in a haystack.
+///
+/// This returns the index corresponding to the first occurrence of one of the
+/// needle bytes in `haystack`, or `None` if one is not found. If an index is
+/// returned, it is guaranteed to be less than `haystack.len()`.
+///
+/// While this is semantically the same as something like
+/// `haystack.iter().position(|&b| b == needle1 || b == needle2 || b == needle3)`,
+/// this routine will attempt to use highly optimized vector operations that
+/// can be an order of magnitude faster (or more).
+///
+/// # Example
+///
+/// This shows how to find the first position of one of three possible bytes in
+/// a haystack.
+///
+/// ```
+/// use memchr::memchr3;
+///
+/// let haystack = b"the quick brown fox";
+/// assert_eq!(memchr3(b'k', b'q', b'u', haystack), Some(4));
+/// ```
+#[inline]
+pub fn memchr3(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ haystack: &[u8],
+) -> Option<usize> {
+ // SAFETY: memchr3_raw, when a match is found, always returns a valid
+ // pointer between start and end.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |start, end| {
+ memchr3_raw(needle1, needle2, needle3, start, end)
+ })
+ }
+}
+
+/// Search for the last occurrence of three possible bytes in a haystack.
+///
+/// This returns the index corresponding to the last occurrence of one of the
+/// needle bytes in `haystack`, or `None` if one is not found. If an index is
+/// returned, it is guaranteed to be less than `haystack.len()`.
+///
+/// While this is semantically the same as something like
+/// `haystack.iter().rposition(|&b| b == needle1 || b == needle2 || b == needle3)`,
+/// this routine will attempt to use highly optimized vector operations that
+/// can be an order of magnitude faster (or more).
+///
+/// # Example
+///
+/// This shows how to find the last position of one of three possible bytes in
+/// a haystack.
+///
+/// ```
+/// use memchr::memrchr3;
+///
+/// let haystack = b"the quick brown fox";
+/// assert_eq!(memrchr3(b'k', b'o', b'n', haystack), Some(17));
+/// ```
+#[inline]
+pub fn memrchr3(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ haystack: &[u8],
+) -> Option<usize> {
+ // SAFETY: memrchr3_raw, when a match is found, always returns a valid
+ // pointer between start and end.
+ unsafe {
+ generic::search_slice_with_raw(haystack, |start, end| {
+ memrchr3_raw(needle1, needle2, needle3, start, end)
+ })
+ }
+}
+
+/// Returns an iterator over all occurrences of the needle in a haystack.
+///
+/// The iterator returned implements `DoubleEndedIterator`. This means it
+/// can also be used to find occurrences in reverse order.
+#[inline]
+pub fn memchr_iter<'h>(needle: u8, haystack: &'h [u8]) -> Memchr<'h> {
+ Memchr::new(needle, haystack)
+}
+
+/// Returns an iterator over all occurrences of the needle in a haystack, in
+/// reverse.
+#[inline]
+pub fn memrchr_iter(needle: u8, haystack: &[u8]) -> Rev<Memchr<'_>> {
+ Memchr::new(needle, haystack).rev()
+}
+
+/// Returns an iterator over all occurrences of the needles in a haystack.
+///
+/// The iterator returned implements `DoubleEndedIterator`. This means it
+/// can also be used to find occurrences in reverse order.
+#[inline]
+pub fn memchr2_iter<'h>(
+ needle1: u8,
+ needle2: u8,
+ haystack: &'h [u8],
+) -> Memchr2<'h> {
+ Memchr2::new(needle1, needle2, haystack)
+}
+
+/// Returns an iterator over all occurrences of the needles in a haystack, in
+/// reverse.
+#[inline]
+pub fn memrchr2_iter(
+ needle1: u8,
+ needle2: u8,
+ haystack: &[u8],
+) -> Rev<Memchr2<'_>> {
+ Memchr2::new(needle1, needle2, haystack).rev()
+}
+
+/// Returns an iterator over all occurrences of the needles in a haystack.
+///
+/// The iterator returned implements `DoubleEndedIterator`. This means it
+/// can also be used to find occurrences in reverse order.
+#[inline]
+pub fn memchr3_iter<'h>(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ haystack: &'h [u8],
+) -> Memchr3<'h> {
+ Memchr3::new(needle1, needle2, needle3, haystack)
+}
+
+/// Returns an iterator over all occurrences of the needles in a haystack, in
+/// reverse.
+#[inline]
+pub fn memrchr3_iter(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ haystack: &[u8],
+) -> Rev<Memchr3<'_>> {
+ Memchr3::new(needle1, needle2, needle3, haystack).rev()
+}
+
+/// An iterator over all occurrences of a single byte in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`memchr_iter`] or `[memrchr_iter`]
+/// functions. It can also be created with the [`Memchr::new`] method.
+///
+/// The lifetime parameter `'h` refers to the lifetime of the haystack being
+/// searched.
+#[derive(Clone, Debug)]
+pub struct Memchr<'h> {
+ needle1: u8,
+ it: crate::arch::generic::memchr::Iter<'h>,
+}
+
+impl<'h> Memchr<'h> {
+ /// Returns an iterator over all occurrences of the needle byte in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn new(needle1: u8, haystack: &'h [u8]) -> Memchr<'h> {
+ Memchr {
+ needle1,
+ it: crate::arch::generic::memchr::Iter::new(haystack),
+ }
+ }
+}
+
+impl<'h> Iterator for Memchr<'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: All of our implementations of memchr ensure that any
+ // pointers returns will fall within the start and end bounds, and this
+ // upholds the safety contract of `self.it.next`.
+ unsafe {
+ // NOTE: I attempted to define an enum of previously created
+ // searchers and then switch on those here instead of just
+ // calling `memchr_raw` (or `One::new(..).find_raw(..)`). But
+ // that turned out to have a fair bit of extra overhead when
+ // searching very small haystacks.
+ self.it.next(|s, e| memchr_raw(self.needle1, s, e))
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count(|s, e| {
+ // SAFETY: We rely on our generic iterator to return valid start
+ // and end pointers.
+ unsafe { count_raw(self.needle1, s, e) }
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'h> DoubleEndedIterator for Memchr<'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: All of our implementations of memchr ensure that any
+ // pointers returns will fall within the start and end bounds, and this
+ // upholds the safety contract of `self.it.next_back`.
+ unsafe { self.it.next_back(|s, e| memrchr_raw(self.needle1, s, e)) }
+ }
+}
+
+impl<'h> core::iter::FusedIterator for Memchr<'h> {}
+
+/// An iterator over all occurrences of two possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`memchr2_iter`] or `[memrchr2_iter`]
+/// functions. It can also be created with the [`Memchr2::new`] method.
+///
+/// The lifetime parameter `'h` refers to the lifetime of the haystack being
+/// searched.
+#[derive(Clone, Debug)]
+pub struct Memchr2<'h> {
+ needle1: u8,
+ needle2: u8,
+ it: crate::arch::generic::memchr::Iter<'h>,
+}
+
+impl<'h> Memchr2<'h> {
+ /// Returns an iterator over all occurrences of the needle bytes in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn new(needle1: u8, needle2: u8, haystack: &'h [u8]) -> Memchr2<'h> {
+ Memchr2 {
+ needle1,
+ needle2,
+ it: crate::arch::generic::memchr::Iter::new(haystack),
+ }
+ }
+}
+
+impl<'h> Iterator for Memchr2<'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: All of our implementations of memchr ensure that any
+ // pointers returns will fall within the start and end bounds, and this
+ // upholds the safety contract of `self.it.next`.
+ unsafe {
+ self.it.next(|s, e| memchr2_raw(self.needle1, self.needle2, s, e))
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'h> DoubleEndedIterator for Memchr2<'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: All of our implementations of memchr ensure that any
+ // pointers returns will fall within the start and end bounds, and this
+ // upholds the safety contract of `self.it.next_back`.
+ unsafe {
+ self.it.next_back(|s, e| {
+ memrchr2_raw(self.needle1, self.needle2, s, e)
+ })
+ }
+ }
+}
+
+impl<'h> core::iter::FusedIterator for Memchr2<'h> {}
+
+/// An iterator over all occurrences of three possible bytes in a haystack.
+///
+/// This iterator implements `DoubleEndedIterator`, which means it can also be
+/// used to find occurrences in reverse order.
+///
+/// This iterator is created by the [`memchr2_iter`] or `[memrchr2_iter`]
+/// functions. It can also be created with the [`Memchr3::new`] method.
+///
+/// The lifetime parameter `'h` refers to the lifetime of the haystack being
+/// searched.
+#[derive(Clone, Debug)]
+pub struct Memchr3<'h> {
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ it: crate::arch::generic::memchr::Iter<'h>,
+}
+
+impl<'h> Memchr3<'h> {
+ /// Returns an iterator over all occurrences of the needle bytes in the
+ /// given haystack.
+ ///
+ /// The iterator returned implements `DoubleEndedIterator`. This means it
+ /// can also be used to find occurrences in reverse order.
+ #[inline]
+ pub fn new(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ haystack: &'h [u8],
+ ) -> Memchr3<'h> {
+ Memchr3 {
+ needle1,
+ needle2,
+ needle3,
+ it: crate::arch::generic::memchr::Iter::new(haystack),
+ }
+ }
+}
+
+impl<'h> Iterator for Memchr3<'h> {
+ type Item = usize;
+
+ #[inline]
+ fn next(&mut self) -> Option<usize> {
+ // SAFETY: All of our implementations of memchr ensure that any
+ // pointers returns will fall within the start and end bounds, and this
+ // upholds the safety contract of `self.it.next`.
+ unsafe {
+ self.it.next(|s, e| {
+ memchr3_raw(self.needle1, self.needle2, self.needle3, s, e)
+ })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+}
+
+impl<'h> DoubleEndedIterator for Memchr3<'h> {
+ #[inline]
+ fn next_back(&mut self) -> Option<usize> {
+ // SAFETY: All of our implementations of memchr ensure that any
+ // pointers returns will fall within the start and end bounds, and this
+ // upholds the safety contract of `self.it.next_back`.
+ unsafe {
+ self.it.next_back(|s, e| {
+ memrchr3_raw(self.needle1, self.needle2, self.needle3, s, e)
+ })
+ }
+ }
+}
+
+impl<'h> core::iter::FusedIterator for Memchr3<'h> {}
+
+/// memchr, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::find_raw`.
+#[inline]
+unsafe fn memchr_raw(
+ needle: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ #[cfg(target_arch = "x86_64")]
+ {
+ // x86_64 does CPU feature detection at runtime in order to use AVX2
+ // instructions even when the `avx2` feature isn't enabled at compile
+ // time. This function also handles using a fallback if neither AVX2
+ // nor SSE2 (unusual) are available.
+ crate::arch::x86_64::memchr::memchr_raw(needle, start, end)
+ }
+ #[cfg(target_arch = "wasm32")]
+ {
+ crate::arch::wasm32::memchr::memchr_raw(needle, start, end)
+ }
+ #[cfg(target_arch = "aarch64")]
+ {
+ crate::arch::aarch64::memchr::memchr_raw(needle, start, end)
+ }
+ #[cfg(not(any(
+ target_arch = "x86_64",
+ target_arch = "wasm32",
+ target_arch = "aarch64"
+ )))]
+ {
+ crate::arch::all::memchr::One::new(needle).find_raw(start, end)
+ }
+}
+
+/// memrchr, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::rfind_raw`.
+#[inline]
+unsafe fn memrchr_raw(
+ needle: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ #[cfg(target_arch = "x86_64")]
+ {
+ crate::arch::x86_64::memchr::memrchr_raw(needle, start, end)
+ }
+ #[cfg(target_arch = "wasm32")]
+ {
+ crate::arch::wasm32::memchr::memrchr_raw(needle, start, end)
+ }
+ #[cfg(target_arch = "aarch64")]
+ {
+ crate::arch::aarch64::memchr::memrchr_raw(needle, start, end)
+ }
+ #[cfg(not(any(
+ target_arch = "x86_64",
+ target_arch = "wasm32",
+ target_arch = "aarch64"
+ )))]
+ {
+ crate::arch::all::memchr::One::new(needle).rfind_raw(start, end)
+ }
+}
+
+/// memchr2, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Two::find_raw`.
+#[inline]
+unsafe fn memchr2_raw(
+ needle1: u8,
+ needle2: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ #[cfg(target_arch = "x86_64")]
+ {
+ crate::arch::x86_64::memchr::memchr2_raw(needle1, needle2, start, end)
+ }
+ #[cfg(target_arch = "wasm32")]
+ {
+ crate::arch::wasm32::memchr::memchr2_raw(needle1, needle2, start, end)
+ }
+ #[cfg(target_arch = "aarch64")]
+ {
+ crate::arch::aarch64::memchr::memchr2_raw(needle1, needle2, start, end)
+ }
+ #[cfg(not(any(
+ target_arch = "x86_64",
+ target_arch = "wasm32",
+ target_arch = "aarch64"
+ )))]
+ {
+ crate::arch::all::memchr::Two::new(needle1, needle2)
+ .find_raw(start, end)
+ }
+}
+
+/// memrchr2, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Two::rfind_raw`.
+#[inline]
+unsafe fn memrchr2_raw(
+ needle1: u8,
+ needle2: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ #[cfg(target_arch = "x86_64")]
+ {
+ crate::arch::x86_64::memchr::memrchr2_raw(needle1, needle2, start, end)
+ }
+ #[cfg(target_arch = "wasm32")]
+ {
+ crate::arch::wasm32::memchr::memrchr2_raw(needle1, needle2, start, end)
+ }
+ #[cfg(target_arch = "aarch64")]
+ {
+ crate::arch::aarch64::memchr::memrchr2_raw(
+ needle1, needle2, start, end,
+ )
+ }
+ #[cfg(not(any(
+ target_arch = "x86_64",
+ target_arch = "wasm32",
+ target_arch = "aarch64"
+ )))]
+ {
+ crate::arch::all::memchr::Two::new(needle1, needle2)
+ .rfind_raw(start, end)
+ }
+}
+
+/// memchr3, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Three::find_raw`.
+#[inline]
+unsafe fn memchr3_raw(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ #[cfg(target_arch = "x86_64")]
+ {
+ crate::arch::x86_64::memchr::memchr3_raw(
+ needle1, needle2, needle3, start, end,
+ )
+ }
+ #[cfg(target_arch = "wasm32")]
+ {
+ crate::arch::wasm32::memchr::memchr3_raw(
+ needle1, needle2, needle3, start, end,
+ )
+ }
+ #[cfg(target_arch = "aarch64")]
+ {
+ crate::arch::aarch64::memchr::memchr3_raw(
+ needle1, needle2, needle3, start, end,
+ )
+ }
+ #[cfg(not(any(
+ target_arch = "x86_64",
+ target_arch = "wasm32",
+ target_arch = "aarch64"
+ )))]
+ {
+ crate::arch::all::memchr::Three::new(needle1, needle2, needle3)
+ .find_raw(start, end)
+ }
+}
+
+/// memrchr3, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `Three::rfind_raw`.
+#[inline]
+unsafe fn memrchr3_raw(
+ needle1: u8,
+ needle2: u8,
+ needle3: u8,
+ start: *const u8,
+ end: *const u8,
+) -> Option<*const u8> {
+ #[cfg(target_arch = "x86_64")]
+ {
+ crate::arch::x86_64::memchr::memrchr3_raw(
+ needle1, needle2, needle3, start, end,
+ )
+ }
+ #[cfg(target_arch = "wasm32")]
+ {
+ crate::arch::wasm32::memchr::memrchr3_raw(
+ needle1, needle2, needle3, start, end,
+ )
+ }
+ #[cfg(target_arch = "aarch64")]
+ {
+ crate::arch::aarch64::memchr::memrchr3_raw(
+ needle1, needle2, needle3, start, end,
+ )
+ }
+ #[cfg(not(any(
+ target_arch = "x86_64",
+ target_arch = "wasm32",
+ target_arch = "aarch64"
+ )))]
+ {
+ crate::arch::all::memchr::Three::new(needle1, needle2, needle3)
+ .rfind_raw(start, end)
+ }
+}
+
+/// Count all matching bytes, but using raw pointers to represent the haystack.
+///
+/// # Safety
+///
+/// Pointers must be valid. See `One::count_raw`.
+#[inline]
+unsafe fn count_raw(needle: u8, start: *const u8, end: *const u8) -> usize {
+ #[cfg(target_arch = "x86_64")]
+ {
+ crate::arch::x86_64::memchr::count_raw(needle, start, end)
+ }
+ #[cfg(target_arch = "wasm32")]
+ {
+ crate::arch::wasm32::memchr::count_raw(needle, start, end)
+ }
+ #[cfg(target_arch = "aarch64")]
+ {
+ crate::arch::aarch64::memchr::count_raw(needle, start, end)
+ }
+ #[cfg(not(any(
+ target_arch = "x86_64",
+ target_arch = "wasm32",
+ target_arch = "aarch64"
+ )))]
+ {
+ crate::arch::all::memchr::One::new(needle).count_raw(start, end)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn forward1_iter() {
+ crate::tests::memchr::Runner::new(1).forward_iter(
+ |haystack, needles| {
+ Some(memchr_iter(needles[0], haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn forward1_oneshot() {
+ crate::tests::memchr::Runner::new(1).forward_oneshot(
+ |haystack, needles| Some(memchr(needles[0], haystack)),
+ )
+ }
+
+ #[test]
+ fn reverse1_iter() {
+ crate::tests::memchr::Runner::new(1).reverse_iter(
+ |haystack, needles| {
+ Some(memrchr_iter(needles[0], haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse1_oneshot() {
+ crate::tests::memchr::Runner::new(1).reverse_oneshot(
+ |haystack, needles| Some(memrchr(needles[0], haystack)),
+ )
+ }
+
+ #[test]
+ fn count1_iter() {
+ crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| {
+ Some(memchr_iter(needles[0], haystack).count())
+ })
+ }
+
+ #[test]
+ fn forward2_iter() {
+ crate::tests::memchr::Runner::new(2).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(memchr2_iter(n1, n2, haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn forward2_oneshot() {
+ crate::tests::memchr::Runner::new(2).forward_oneshot(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(memchr2(n1, n2, haystack))
+ },
+ )
+ }
+
+ #[test]
+ fn reverse2_iter() {
+ crate::tests::memchr::Runner::new(2).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(memrchr2_iter(n1, n2, haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse2_oneshot() {
+ crate::tests::memchr::Runner::new(2).reverse_oneshot(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ Some(memrchr2(n1, n2, haystack))
+ },
+ )
+ }
+
+ #[test]
+ fn forward3_iter() {
+ crate::tests::memchr::Runner::new(3).forward_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(memchr3_iter(n1, n2, n3, haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn forward3_oneshot() {
+ crate::tests::memchr::Runner::new(3).forward_oneshot(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(memchr3(n1, n2, n3, haystack))
+ },
+ )
+ }
+
+ #[test]
+ fn reverse3_iter() {
+ crate::tests::memchr::Runner::new(3).reverse_iter(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(memrchr3_iter(n1, n2, n3, haystack).collect())
+ },
+ )
+ }
+
+ #[test]
+ fn reverse3_oneshot() {
+ crate::tests::memchr::Runner::new(3).reverse_oneshot(
+ |haystack, needles| {
+ let n1 = needles.get(0).copied()?;
+ let n2 = needles.get(1).copied()?;
+ let n3 = needles.get(2).copied()?;
+ Some(memrchr3(n1, n2, n3, haystack))
+ },
+ )
+ }
+
+ // Prior to memchr 2.6, the memchr iterators both implemented Send and
+ // Sync. But in memchr 2.6, the iterator changed to use raw pointers
+ // internally and I didn't add explicit Send/Sync impls. This ended up
+ // regressing the API. This test ensures we don't do that again.
+ //
+ // See: https://github.com/BurntSushi/memchr/issues/133
+ #[test]
+ fn sync_regression() {
+ use core::panic::{RefUnwindSafe, UnwindSafe};
+
+ fn assert_send_sync<T: Send + Sync + UnwindSafe + RefUnwindSafe>() {}
+ assert_send_sync::<Memchr>();
+ assert_send_sync::<Memchr2>();
+ assert_send_sync::<Memchr3>()
+ }
+}
diff --git a/vendor/memchr/src/memchr/c.rs b/vendor/memchr/src/memchr/c.rs
deleted file mode 100644
index 608aabc98..000000000
--- a/vendor/memchr/src/memchr/c.rs
+++ /dev/null
@@ -1,44 +0,0 @@
-// This module defines safe wrappers around memchr (POSIX) and memrchr (GNU
-// extension).
-
-#![allow(dead_code)]
-
-use libc::{c_int, c_void, size_t};
-
-pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
- // SAFETY: This is safe to call since all pointers are valid.
- let p = unsafe {
- libc::memchr(
- haystack.as_ptr() as *const c_void,
- needle as c_int,
- haystack.len() as size_t,
- )
- };
- if p.is_null() {
- None
- } else {
- Some(p as usize - (haystack.as_ptr() as usize))
- }
-}
-
-// memrchr is a GNU extension. We know it's available on Linux at least.
-#[cfg(target_os = "linux")]
-pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
- // GNU's memrchr() will - unlike memchr() - error if haystack is empty.
- if haystack.is_empty() {
- return None;
- }
- // SAFETY: This is safe to call since all pointers are valid.
- let p = unsafe {
- libc::memrchr(
- haystack.as_ptr() as *const c_void,
- needle as c_int,
- haystack.len() as size_t,
- )
- };
- if p.is_null() {
- None
- } else {
- Some(p as usize - (haystack.as_ptr() as usize))
- }
-}
diff --git a/vendor/memchr/src/memchr/fallback.rs b/vendor/memchr/src/memchr/fallback.rs
deleted file mode 100644
index b01f224fa..000000000
--- a/vendor/memchr/src/memchr/fallback.rs
+++ /dev/null
@@ -1,329 +0,0 @@
-// This module defines pure Rust platform independent implementations of all
-// the memchr routines. We do our best to make them fast. Some of them may even
-// get auto-vectorized.
-
-use core::{cmp, usize};
-
-#[cfg(target_pointer_width = "16")]
-const USIZE_BYTES: usize = 2;
-
-#[cfg(target_pointer_width = "32")]
-const USIZE_BYTES: usize = 4;
-
-#[cfg(target_pointer_width = "64")]
-const USIZE_BYTES: usize = 8;
-
-// The number of bytes to loop at in one iteration of memchr/memrchr.
-const LOOP_SIZE: usize = 2 * USIZE_BYTES;
-
-/// Return `true` if `x` contains any zero byte.
-///
-/// From *Matters Computational*, J. Arndt
-///
-/// "The idea is to subtract one from each of the bytes and then look for
-/// bytes where the borrow propagated all the way to the most significant
-/// bit."
-#[inline(always)]
-fn contains_zero_byte(x: usize) -> bool {
- const LO_U64: u64 = 0x0101010101010101;
- const HI_U64: u64 = 0x8080808080808080;
-
- const LO_USIZE: usize = LO_U64 as usize;
- const HI_USIZE: usize = HI_U64 as usize;
-
- x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
-}
-
-/// Repeat the given byte into a word size number. That is, every 8 bits
-/// is equivalent to the given byte. For example, if `b` is `\x4E` or
-/// `01001110` in binary, then the returned value on a 32-bit system would be:
-/// `01001110_01001110_01001110_01001110`.
-#[inline(always)]
-fn repeat_byte(b: u8) -> usize {
- (b as usize) * (usize::MAX / 255)
-}
-
-pub fn memchr(n1: u8, haystack: &[u8]) -> Option<usize> {
- let vn1 = repeat_byte(n1);
- let confirm = |byte| byte == n1;
- let loop_size = cmp::min(LOOP_SIZE, haystack.len());
- let align = USIZE_BYTES - 1;
- let start_ptr = haystack.as_ptr();
- let mut ptr = start_ptr;
-
- unsafe {
- let end_ptr = start_ptr.add(haystack.len());
- if haystack.len() < USIZE_BYTES {
- return forward_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- let chunk = (ptr as *const usize).read_unaligned();
- if contains_zero_byte(chunk ^ vn1) {
- return forward_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- ptr = ptr.add(USIZE_BYTES - (start_ptr as usize & align));
- debug_assert!(ptr > start_ptr);
- debug_assert!(end_ptr.sub(USIZE_BYTES) >= start_ptr);
- while loop_size == LOOP_SIZE && ptr <= end_ptr.sub(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % USIZE_BYTES);
-
- let a = *(ptr as *const usize);
- let b = *(ptr.add(USIZE_BYTES) as *const usize);
- let eqa = contains_zero_byte(a ^ vn1);
- let eqb = contains_zero_byte(b ^ vn1);
- if eqa || eqb {
- break;
- }
- ptr = ptr.add(LOOP_SIZE);
- }
- forward_search(start_ptr, end_ptr, ptr, confirm)
- }
-}
-
-/// Like `memchr`, but searches for two bytes instead of one.
-pub fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- let vn1 = repeat_byte(n1);
- let vn2 = repeat_byte(n2);
- let confirm = |byte| byte == n1 || byte == n2;
- let align = USIZE_BYTES - 1;
- let start_ptr = haystack.as_ptr();
- let mut ptr = start_ptr;
-
- unsafe {
- let end_ptr = start_ptr.add(haystack.len());
- if haystack.len() < USIZE_BYTES {
- return forward_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- let chunk = (ptr as *const usize).read_unaligned();
- let eq1 = contains_zero_byte(chunk ^ vn1);
- let eq2 = contains_zero_byte(chunk ^ vn2);
- if eq1 || eq2 {
- return forward_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- ptr = ptr.add(USIZE_BYTES - (start_ptr as usize & align));
- debug_assert!(ptr > start_ptr);
- debug_assert!(end_ptr.sub(USIZE_BYTES) >= start_ptr);
- while ptr <= end_ptr.sub(USIZE_BYTES) {
- debug_assert_eq!(0, (ptr as usize) % USIZE_BYTES);
-
- let chunk = *(ptr as *const usize);
- let eq1 = contains_zero_byte(chunk ^ vn1);
- let eq2 = contains_zero_byte(chunk ^ vn2);
- if eq1 || eq2 {
- break;
- }
- ptr = ptr.add(USIZE_BYTES);
- }
- forward_search(start_ptr, end_ptr, ptr, confirm)
- }
-}
-
-/// Like `memchr`, but searches for three bytes instead of one.
-pub fn memchr3(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- let vn1 = repeat_byte(n1);
- let vn2 = repeat_byte(n2);
- let vn3 = repeat_byte(n3);
- let confirm = |byte| byte == n1 || byte == n2 || byte == n3;
- let align = USIZE_BYTES - 1;
- let start_ptr = haystack.as_ptr();
- let mut ptr = start_ptr;
-
- unsafe {
- let end_ptr = start_ptr.add(haystack.len());
- if haystack.len() < USIZE_BYTES {
- return forward_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- let chunk = (ptr as *const usize).read_unaligned();
- let eq1 = contains_zero_byte(chunk ^ vn1);
- let eq2 = contains_zero_byte(chunk ^ vn2);
- let eq3 = contains_zero_byte(chunk ^ vn3);
- if eq1 || eq2 || eq3 {
- return forward_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- ptr = ptr.add(USIZE_BYTES - (start_ptr as usize & align));
- debug_assert!(ptr > start_ptr);
- debug_assert!(end_ptr.sub(USIZE_BYTES) >= start_ptr);
- while ptr <= end_ptr.sub(USIZE_BYTES) {
- debug_assert_eq!(0, (ptr as usize) % USIZE_BYTES);
-
- let chunk = *(ptr as *const usize);
- let eq1 = contains_zero_byte(chunk ^ vn1);
- let eq2 = contains_zero_byte(chunk ^ vn2);
- let eq3 = contains_zero_byte(chunk ^ vn3);
- if eq1 || eq2 || eq3 {
- break;
- }
- ptr = ptr.add(USIZE_BYTES);
- }
- forward_search(start_ptr, end_ptr, ptr, confirm)
- }
-}
-
-/// Return the last index matching the byte `x` in `text`.
-pub fn memrchr(n1: u8, haystack: &[u8]) -> Option<usize> {
- let vn1 = repeat_byte(n1);
- let confirm = |byte| byte == n1;
- let loop_size = cmp::min(LOOP_SIZE, haystack.len());
- let align = USIZE_BYTES - 1;
- let start_ptr = haystack.as_ptr();
-
- unsafe {
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = end_ptr;
- if haystack.len() < USIZE_BYTES {
- return reverse_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- let chunk = (ptr.sub(USIZE_BYTES) as *const usize).read_unaligned();
- if contains_zero_byte(chunk ^ vn1) {
- return reverse_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- ptr = (end_ptr as usize & !align) as *const u8;
- debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
- while loop_size == LOOP_SIZE && ptr >= start_ptr.add(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % USIZE_BYTES);
-
- let a = *(ptr.sub(2 * USIZE_BYTES) as *const usize);
- let b = *(ptr.sub(1 * USIZE_BYTES) as *const usize);
- let eqa = contains_zero_byte(a ^ vn1);
- let eqb = contains_zero_byte(b ^ vn1);
- if eqa || eqb {
- break;
- }
- ptr = ptr.sub(loop_size);
- }
- reverse_search(start_ptr, end_ptr, ptr, confirm)
- }
-}
-
-/// Like `memrchr`, but searches for two bytes instead of one.
-pub fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- let vn1 = repeat_byte(n1);
- let vn2 = repeat_byte(n2);
- let confirm = |byte| byte == n1 || byte == n2;
- let align = USIZE_BYTES - 1;
- let start_ptr = haystack.as_ptr();
-
- unsafe {
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = end_ptr;
- if haystack.len() < USIZE_BYTES {
- return reverse_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- let chunk = (ptr.sub(USIZE_BYTES) as *const usize).read_unaligned();
- let eq1 = contains_zero_byte(chunk ^ vn1);
- let eq2 = contains_zero_byte(chunk ^ vn2);
- if eq1 || eq2 {
- return reverse_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- ptr = (end_ptr as usize & !align) as *const u8;
- debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
- while ptr >= start_ptr.add(USIZE_BYTES) {
- debug_assert_eq!(0, (ptr as usize) % USIZE_BYTES);
-
- let chunk = *(ptr.sub(USIZE_BYTES) as *const usize);
- let eq1 = contains_zero_byte(chunk ^ vn1);
- let eq2 = contains_zero_byte(chunk ^ vn2);
- if eq1 || eq2 {
- break;
- }
- ptr = ptr.sub(USIZE_BYTES);
- }
- reverse_search(start_ptr, end_ptr, ptr, confirm)
- }
-}
-
-/// Like `memrchr`, but searches for three bytes instead of one.
-pub fn memrchr3(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- let vn1 = repeat_byte(n1);
- let vn2 = repeat_byte(n2);
- let vn3 = repeat_byte(n3);
- let confirm = |byte| byte == n1 || byte == n2 || byte == n3;
- let align = USIZE_BYTES - 1;
- let start_ptr = haystack.as_ptr();
-
- unsafe {
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = end_ptr;
- if haystack.len() < USIZE_BYTES {
- return reverse_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- let chunk = (ptr.sub(USIZE_BYTES) as *const usize).read_unaligned();
- let eq1 = contains_zero_byte(chunk ^ vn1);
- let eq2 = contains_zero_byte(chunk ^ vn2);
- let eq3 = contains_zero_byte(chunk ^ vn3);
- if eq1 || eq2 || eq3 {
- return reverse_search(start_ptr, end_ptr, ptr, confirm);
- }
-
- ptr = (end_ptr as usize & !align) as *const u8;
- debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
- while ptr >= start_ptr.add(USIZE_BYTES) {
- debug_assert_eq!(0, (ptr as usize) % USIZE_BYTES);
-
- let chunk = *(ptr.sub(USIZE_BYTES) as *const usize);
- let eq1 = contains_zero_byte(chunk ^ vn1);
- let eq2 = contains_zero_byte(chunk ^ vn2);
- let eq3 = contains_zero_byte(chunk ^ vn3);
- if eq1 || eq2 || eq3 {
- break;
- }
- ptr = ptr.sub(USIZE_BYTES);
- }
- reverse_search(start_ptr, end_ptr, ptr, confirm)
- }
-}
-
-#[inline(always)]
-unsafe fn forward_search<F: Fn(u8) -> bool>(
- start_ptr: *const u8,
- end_ptr: *const u8,
- mut ptr: *const u8,
- confirm: F,
-) -> Option<usize> {
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr);
-
- while ptr < end_ptr {
- if confirm(*ptr) {
- return Some(sub(ptr, start_ptr));
- }
- ptr = ptr.offset(1);
- }
- None
-}
-
-#[inline(always)]
-unsafe fn reverse_search<F: Fn(u8) -> bool>(
- start_ptr: *const u8,
- end_ptr: *const u8,
- mut ptr: *const u8,
- confirm: F,
-) -> Option<usize> {
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr);
-
- while ptr > start_ptr {
- ptr = ptr.offset(-1);
- if confirm(*ptr) {
- return Some(sub(ptr, start_ptr));
- }
- }
- None
-}
-
-/// Subtract `b` from `a` and return the difference. `a` should be greater than
-/// or equal to `b`.
-fn sub(a: *const u8, b: *const u8) -> usize {
- debug_assert!(a >= b);
- (a as usize) - (b as usize)
-}
diff --git a/vendor/memchr/src/memchr/iter.rs b/vendor/memchr/src/memchr/iter.rs
deleted file mode 100644
index 16e203f63..000000000
--- a/vendor/memchr/src/memchr/iter.rs
+++ /dev/null
@@ -1,173 +0,0 @@
-use crate::{memchr, memchr2, memchr3, memrchr, memrchr2, memrchr3};
-
-macro_rules! iter_next {
- // Common code for the memchr iterators:
- // update haystack and position and produce the index
- //
- // self: &mut Self where Self is the iterator
- // search_result: Option<usize> which is the result of the corresponding
- // memchr function.
- //
- // Returns Option<usize> (the next iterator element)
- ($self_:expr, $search_result:expr) => {
- $search_result.map(move |index| {
- // split and take the remaining back half
- $self_.haystack = $self_.haystack.split_at(index + 1).1;
- let found_position = $self_.position + index;
- $self_.position = found_position + 1;
- found_position
- })
- };
-}
-
-macro_rules! iter_next_back {
- ($self_:expr, $search_result:expr) => {
- $search_result.map(move |index| {
- // split and take the remaining front half
- $self_.haystack = $self_.haystack.split_at(index).0;
- $self_.position + index
- })
- };
-}
-
-/// An iterator for `memchr`.
-pub struct Memchr<'a> {
- needle: u8,
- // The haystack to iterate over
- haystack: &'a [u8],
- // The index
- position: usize,
-}
-
-impl<'a> Memchr<'a> {
- /// Creates a new iterator that yields all positions of needle in haystack.
- #[inline]
- pub fn new(needle: u8, haystack: &[u8]) -> Memchr<'_> {
- Memchr { needle: needle, haystack: haystack, position: 0 }
- }
-}
-
-impl<'a> Iterator for Memchr<'a> {
- type Item = usize;
-
- #[inline]
- fn next(&mut self) -> Option<usize> {
- iter_next!(self, memchr(self.needle, self.haystack))
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- (0, Some(self.haystack.len()))
- }
-}
-
-impl<'a> DoubleEndedIterator for Memchr<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<Self::Item> {
- iter_next_back!(self, memrchr(self.needle, self.haystack))
- }
-}
-
-/// An iterator for `memchr2`.
-pub struct Memchr2<'a> {
- needle1: u8,
- needle2: u8,
- // The haystack to iterate over
- haystack: &'a [u8],
- // The index
- position: usize,
-}
-
-impl<'a> Memchr2<'a> {
- /// Creates a new iterator that yields all positions of needle in haystack.
- #[inline]
- pub fn new(needle1: u8, needle2: u8, haystack: &[u8]) -> Memchr2<'_> {
- Memchr2 {
- needle1: needle1,
- needle2: needle2,
- haystack: haystack,
- position: 0,
- }
- }
-}
-
-impl<'a> Iterator for Memchr2<'a> {
- type Item = usize;
-
- #[inline]
- fn next(&mut self) -> Option<usize> {
- iter_next!(self, memchr2(self.needle1, self.needle2, self.haystack))
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- (0, Some(self.haystack.len()))
- }
-}
-
-impl<'a> DoubleEndedIterator for Memchr2<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<Self::Item> {
- iter_next_back!(
- self,
- memrchr2(self.needle1, self.needle2, self.haystack)
- )
- }
-}
-
-/// An iterator for `memchr3`.
-pub struct Memchr3<'a> {
- needle1: u8,
- needle2: u8,
- needle3: u8,
- // The haystack to iterate over
- haystack: &'a [u8],
- // The index
- position: usize,
-}
-
-impl<'a> Memchr3<'a> {
- /// Create a new `Memchr3` that's initialized to zero with a haystack
- #[inline]
- pub fn new(
- needle1: u8,
- needle2: u8,
- needle3: u8,
- haystack: &[u8],
- ) -> Memchr3<'_> {
- Memchr3 {
- needle1: needle1,
- needle2: needle2,
- needle3: needle3,
- haystack: haystack,
- position: 0,
- }
- }
-}
-
-impl<'a> Iterator for Memchr3<'a> {
- type Item = usize;
-
- #[inline]
- fn next(&mut self) -> Option<usize> {
- iter_next!(
- self,
- memchr3(self.needle1, self.needle2, self.needle3, self.haystack)
- )
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- (0, Some(self.haystack.len()))
- }
-}
-
-impl<'a> DoubleEndedIterator for Memchr3<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<Self::Item> {
- iter_next_back!(
- self,
- memrchr3(self.needle1, self.needle2, self.needle3, self.haystack)
- )
- }
-}
diff --git a/vendor/memchr/src/memchr/mod.rs b/vendor/memchr/src/memchr/mod.rs
deleted file mode 100644
index 09ce6ef3c..000000000
--- a/vendor/memchr/src/memchr/mod.rs
+++ /dev/null
@@ -1,410 +0,0 @@
-use core::iter::Rev;
-
-pub use self::iter::{Memchr, Memchr2, Memchr3};
-
-// N.B. If you're looking for the cfg knobs for libc, see build.rs.
-#[cfg(memchr_libc)]
-mod c;
-#[allow(dead_code)]
-pub mod fallback;
-mod iter;
-pub mod naive;
-#[cfg(all(not(miri), target_arch = "x86_64", memchr_runtime_simd))]
-mod x86;
-
-/// An iterator over all occurrences of the needle in a haystack.
-#[inline]
-pub fn memchr_iter(needle: u8, haystack: &[u8]) -> Memchr<'_> {
- Memchr::new(needle, haystack)
-}
-
-/// An iterator over all occurrences of the needles in a haystack.
-#[inline]
-pub fn memchr2_iter(needle1: u8, needle2: u8, haystack: &[u8]) -> Memchr2<'_> {
- Memchr2::new(needle1, needle2, haystack)
-}
-
-/// An iterator over all occurrences of the needles in a haystack.
-#[inline]
-pub fn memchr3_iter(
- needle1: u8,
- needle2: u8,
- needle3: u8,
- haystack: &[u8],
-) -> Memchr3<'_> {
- Memchr3::new(needle1, needle2, needle3, haystack)
-}
-
-/// An iterator over all occurrences of the needle in a haystack, in reverse.
-#[inline]
-pub fn memrchr_iter(needle: u8, haystack: &[u8]) -> Rev<Memchr<'_>> {
- Memchr::new(needle, haystack).rev()
-}
-
-/// An iterator over all occurrences of the needles in a haystack, in reverse.
-#[inline]
-pub fn memrchr2_iter(
- needle1: u8,
- needle2: u8,
- haystack: &[u8],
-) -> Rev<Memchr2<'_>> {
- Memchr2::new(needle1, needle2, haystack).rev()
-}
-
-/// An iterator over all occurrences of the needles in a haystack, in reverse.
-#[inline]
-pub fn memrchr3_iter(
- needle1: u8,
- needle2: u8,
- needle3: u8,
- haystack: &[u8],
-) -> Rev<Memchr3<'_>> {
- Memchr3::new(needle1, needle2, needle3, haystack).rev()
-}
-
-/// Search for the first occurrence of a byte in a slice.
-///
-/// This returns the index corresponding to the first occurrence of `needle` in
-/// `haystack`, or `None` if one is not found. If an index is returned, it is
-/// guaranteed to be less than `usize::MAX`.
-///
-/// While this is operationally the same as something like
-/// `haystack.iter().position(|&b| b == needle)`, `memchr` will use a highly
-/// optimized routine that can be up to an order of magnitude faster in some
-/// cases.
-///
-/// # Example
-///
-/// This shows how to find the first position of a byte in a byte string.
-///
-/// ```
-/// use memchr::memchr;
-///
-/// let haystack = b"the quick brown fox";
-/// assert_eq!(memchr(b'k', haystack), Some(8));
-/// ```
-#[inline]
-pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
- #[cfg(miri)]
- #[inline(always)]
- fn imp(n1: u8, haystack: &[u8]) -> Option<usize> {
- naive::memchr(n1, haystack)
- }
-
- #[cfg(all(target_arch = "x86_64", memchr_runtime_simd, not(miri)))]
- #[inline(always)]
- fn imp(n1: u8, haystack: &[u8]) -> Option<usize> {
- x86::memchr(n1, haystack)
- }
-
- #[cfg(all(
- memchr_libc,
- not(all(target_arch = "x86_64", memchr_runtime_simd)),
- not(miri),
- ))]
- #[inline(always)]
- fn imp(n1: u8, haystack: &[u8]) -> Option<usize> {
- c::memchr(n1, haystack)
- }
-
- #[cfg(all(
- not(memchr_libc),
- not(all(target_arch = "x86_64", memchr_runtime_simd)),
- not(miri),
- ))]
- #[inline(always)]
- fn imp(n1: u8, haystack: &[u8]) -> Option<usize> {
- fallback::memchr(n1, haystack)
- }
-
- if haystack.is_empty() {
- None
- } else {
- imp(needle, haystack)
- }
-}
-
-/// Like `memchr`, but searches for either of two bytes instead of just one.
-///
-/// This returns the index corresponding to the first occurrence of `needle1`
-/// or the first occurrence of `needle2` in `haystack` (whichever occurs
-/// earlier), or `None` if neither one is found. If an index is returned, it is
-/// guaranteed to be less than `usize::MAX`.
-///
-/// While this is operationally the same as something like
-/// `haystack.iter().position(|&b| b == needle1 || b == needle2)`, `memchr2`
-/// will use a highly optimized routine that can be up to an order of magnitude
-/// faster in some cases.
-///
-/// # Example
-///
-/// This shows how to find the first position of either of two bytes in a byte
-/// string.
-///
-/// ```
-/// use memchr::memchr2;
-///
-/// let haystack = b"the quick brown fox";
-/// assert_eq!(memchr2(b'k', b'q', haystack), Some(4));
-/// ```
-#[inline]
-pub fn memchr2(needle1: u8, needle2: u8, haystack: &[u8]) -> Option<usize> {
- #[cfg(miri)]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- naive::memchr2(n1, n2, haystack)
- }
-
- #[cfg(all(target_arch = "x86_64", memchr_runtime_simd, not(miri)))]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- x86::memchr2(n1, n2, haystack)
- }
-
- #[cfg(all(
- not(all(target_arch = "x86_64", memchr_runtime_simd)),
- not(miri),
- ))]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- fallback::memchr2(n1, n2, haystack)
- }
-
- if haystack.is_empty() {
- None
- } else {
- imp(needle1, needle2, haystack)
- }
-}
-
-/// Like `memchr`, but searches for any of three bytes instead of just one.
-///
-/// This returns the index corresponding to the first occurrence of `needle1`,
-/// the first occurrence of `needle2`, or the first occurrence of `needle3` in
-/// `haystack` (whichever occurs earliest), or `None` if none are found. If an
-/// index is returned, it is guaranteed to be less than `usize::MAX`.
-///
-/// While this is operationally the same as something like
-/// `haystack.iter().position(|&b| b == needle1 || b == needle2 ||
-/// b == needle3)`, `memchr3` will use a highly optimized routine that can be
-/// up to an order of magnitude faster in some cases.
-///
-/// # Example
-///
-/// This shows how to find the first position of any of three bytes in a byte
-/// string.
-///
-/// ```
-/// use memchr::memchr3;
-///
-/// let haystack = b"the quick brown fox";
-/// assert_eq!(memchr3(b'k', b'q', b'e', haystack), Some(2));
-/// ```
-#[inline]
-pub fn memchr3(
- needle1: u8,
- needle2: u8,
- needle3: u8,
- haystack: &[u8],
-) -> Option<usize> {
- #[cfg(miri)]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- naive::memchr3(n1, n2, n3, haystack)
- }
-
- #[cfg(all(target_arch = "x86_64", memchr_runtime_simd, not(miri)))]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- x86::memchr3(n1, n2, n3, haystack)
- }
-
- #[cfg(all(
- not(all(target_arch = "x86_64", memchr_runtime_simd)),
- not(miri),
- ))]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- fallback::memchr3(n1, n2, n3, haystack)
- }
-
- if haystack.is_empty() {
- None
- } else {
- imp(needle1, needle2, needle3, haystack)
- }
-}
-
-/// Search for the last occurrence of a byte in a slice.
-///
-/// This returns the index corresponding to the last occurrence of `needle` in
-/// `haystack`, or `None` if one is not found. If an index is returned, it is
-/// guaranteed to be less than `usize::MAX`.
-///
-/// While this is operationally the same as something like
-/// `haystack.iter().rposition(|&b| b == needle)`, `memrchr` will use a highly
-/// optimized routine that can be up to an order of magnitude faster in some
-/// cases.
-///
-/// # Example
-///
-/// This shows how to find the last position of a byte in a byte string.
-///
-/// ```
-/// use memchr::memrchr;
-///
-/// let haystack = b"the quick brown fox";
-/// assert_eq!(memrchr(b'o', haystack), Some(17));
-/// ```
-#[inline]
-pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
- #[cfg(miri)]
- #[inline(always)]
- fn imp(n1: u8, haystack: &[u8]) -> Option<usize> {
- naive::memrchr(n1, haystack)
- }
-
- #[cfg(all(target_arch = "x86_64", memchr_runtime_simd, not(miri)))]
- #[inline(always)]
- fn imp(n1: u8, haystack: &[u8]) -> Option<usize> {
- x86::memrchr(n1, haystack)
- }
-
- #[cfg(all(
- memchr_libc,
- target_os = "linux",
- not(all(target_arch = "x86_64", memchr_runtime_simd)),
- not(miri)
- ))]
- #[inline(always)]
- fn imp(n1: u8, haystack: &[u8]) -> Option<usize> {
- c::memrchr(n1, haystack)
- }
-
- #[cfg(all(
- not(all(memchr_libc, target_os = "linux")),
- not(all(target_arch = "x86_64", memchr_runtime_simd)),
- not(miri),
- ))]
- #[inline(always)]
- fn imp(n1: u8, haystack: &[u8]) -> Option<usize> {
- fallback::memrchr(n1, haystack)
- }
-
- if haystack.is_empty() {
- None
- } else {
- imp(needle, haystack)
- }
-}
-
-/// Like `memrchr`, but searches for either of two bytes instead of just one.
-///
-/// This returns the index corresponding to the last occurrence of `needle1` or
-/// the last occurrence of `needle2` in `haystack` (whichever occurs later), or
-/// `None` if neither one is found. If an index is returned, it is guaranteed
-/// to be less than `usize::MAX`.
-///
-/// While this is operationally the same as something like
-/// `haystack.iter().rposition(|&b| b == needle1 || b == needle2)`, `memrchr2`
-/// will use a highly optimized routine that can be up to an order of magnitude
-/// faster in some cases.
-///
-/// # Example
-///
-/// This shows how to find the last position of either of two bytes in a byte
-/// string.
-///
-/// ```
-/// use memchr::memrchr2;
-///
-/// let haystack = b"the quick brown fox";
-/// assert_eq!(memrchr2(b'k', b'q', haystack), Some(8));
-/// ```
-#[inline]
-pub fn memrchr2(needle1: u8, needle2: u8, haystack: &[u8]) -> Option<usize> {
- #[cfg(miri)]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- naive::memrchr2(n1, n2, haystack)
- }
-
- #[cfg(all(target_arch = "x86_64", memchr_runtime_simd, not(miri)))]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- x86::memrchr2(n1, n2, haystack)
- }
-
- #[cfg(all(
- not(all(target_arch = "x86_64", memchr_runtime_simd)),
- not(miri),
- ))]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- fallback::memrchr2(n1, n2, haystack)
- }
-
- if haystack.is_empty() {
- None
- } else {
- imp(needle1, needle2, haystack)
- }
-}
-
-/// Like `memrchr`, but searches for any of three bytes instead of just one.
-///
-/// This returns the index corresponding to the last occurrence of `needle1`,
-/// the last occurrence of `needle2`, or the last occurrence of `needle3` in
-/// `haystack` (whichever occurs later), or `None` if none are found. If an
-/// index is returned, it is guaranteed to be less than `usize::MAX`.
-///
-/// While this is operationally the same as something like
-/// `haystack.iter().rposition(|&b| b == needle1 || b == needle2 ||
-/// b == needle3)`, `memrchr3` will use a highly optimized routine that can be
-/// up to an order of magnitude faster in some cases.
-///
-/// # Example
-///
-/// This shows how to find the last position of any of three bytes in a byte
-/// string.
-///
-/// ```
-/// use memchr::memrchr3;
-///
-/// let haystack = b"the quick brown fox";
-/// assert_eq!(memrchr3(b'k', b'q', b'e', haystack), Some(8));
-/// ```
-#[inline]
-pub fn memrchr3(
- needle1: u8,
- needle2: u8,
- needle3: u8,
- haystack: &[u8],
-) -> Option<usize> {
- #[cfg(miri)]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- naive::memrchr3(n1, n2, n3, haystack)
- }
-
- #[cfg(all(target_arch = "x86_64", memchr_runtime_simd, not(miri)))]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- x86::memrchr3(n1, n2, n3, haystack)
- }
-
- #[cfg(all(
- not(all(target_arch = "x86_64", memchr_runtime_simd)),
- not(miri),
- ))]
- #[inline(always)]
- fn imp(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- fallback::memrchr3(n1, n2, n3, haystack)
- }
-
- if haystack.is_empty() {
- None
- } else {
- imp(needle1, needle2, needle3, haystack)
- }
-}
diff --git a/vendor/memchr/src/memchr/naive.rs b/vendor/memchr/src/memchr/naive.rs
deleted file mode 100644
index 3f3053d48..000000000
--- a/vendor/memchr/src/memchr/naive.rs
+++ /dev/null
@@ -1,25 +0,0 @@
-#![allow(dead_code)]
-
-pub fn memchr(n1: u8, haystack: &[u8]) -> Option<usize> {
- haystack.iter().position(|&b| b == n1)
-}
-
-pub fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- haystack.iter().position(|&b| b == n1 || b == n2)
-}
-
-pub fn memchr3(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- haystack.iter().position(|&b| b == n1 || b == n2 || b == n3)
-}
-
-pub fn memrchr(n1: u8, haystack: &[u8]) -> Option<usize> {
- haystack.iter().rposition(|&b| b == n1)
-}
-
-pub fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- haystack.iter().rposition(|&b| b == n1 || b == n2)
-}
-
-pub fn memrchr3(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- haystack.iter().rposition(|&b| b == n1 || b == n2 || b == n3)
-}
diff --git a/vendor/memchr/src/memchr/x86/avx.rs b/vendor/memchr/src/memchr/x86/avx.rs
deleted file mode 100644
index 535123097..000000000
--- a/vendor/memchr/src/memchr/x86/avx.rs
+++ /dev/null
@@ -1,755 +0,0 @@
-use core::{arch::x86_64::*, cmp, mem::size_of};
-
-use super::sse2;
-
-const VECTOR_SIZE: usize = size_of::<__m256i>();
-const VECTOR_ALIGN: usize = VECTOR_SIZE - 1;
-
-// The number of bytes to loop at in one iteration of memchr/memrchr.
-const LOOP_SIZE: usize = 4 * VECTOR_SIZE;
-
-// The number of bytes to loop at in one iteration of memchr2/memrchr2 and
-// memchr3/memrchr3. There was no observable difference between 128 and 64
-// bytes in benchmarks. memchr3 in particular only gets a very slight speed up
-// from the loop unrolling.
-const LOOP_SIZE2: usize = 2 * VECTOR_SIZE;
-
-#[target_feature(enable = "avx2")]
-pub unsafe fn memchr(n1: u8, haystack: &[u8]) -> Option<usize> {
- // For a high level explanation for how this algorithm works, see the
- // sse2 implementation. The avx implementation here is the same, but with
- // 256-bit vectors instead of 128-bit vectors.
-
- // This routine is called whenever a match is detected. It is specifically
- // marked as unlineable because it improves the codegen of the unrolled
- // loop below. Inlining this seems to cause codegen with some extra adds
- // and a load that aren't necessary. This seems to result in about a 10%
- // improvement for the memchr1/crate/huge/never benchmark.
- //
- // Interestingly, I couldn't observe a similar improvement for memrchr.
- #[cold]
- #[inline(never)]
- #[target_feature(enable = "avx2")]
- unsafe fn matched(
- start_ptr: *const u8,
- ptr: *const u8,
- eqa: __m256i,
- eqb: __m256i,
- eqc: __m256i,
- eqd: __m256i,
- ) -> usize {
- let mut at = sub(ptr, start_ptr);
- let mask = _mm256_movemask_epi8(eqa);
- if mask != 0 {
- return at + forward_pos(mask);
- }
-
- at += VECTOR_SIZE;
- let mask = _mm256_movemask_epi8(eqb);
- if mask != 0 {
- return at + forward_pos(mask);
- }
-
- at += VECTOR_SIZE;
- let mask = _mm256_movemask_epi8(eqc);
- if mask != 0 {
- return at + forward_pos(mask);
- }
-
- at += VECTOR_SIZE;
- let mask = _mm256_movemask_epi8(eqd);
- debug_assert!(mask != 0);
- at + forward_pos(mask)
- }
-
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = start_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- // For small haystacks, defer to the SSE2 implementation. Codegen
- // suggests this completely avoids touching the AVX vectors.
- return sse2::memchr(n1, haystack);
- }
-
- let vn1 = _mm256_set1_epi8(n1 as i8);
- let loop_size = cmp::min(LOOP_SIZE, haystack.len());
- if let Some(i) = forward_search1(start_ptr, end_ptr, ptr, vn1) {
- return Some(i);
- }
-
- ptr = ptr.add(VECTOR_SIZE - (start_ptr as usize & VECTOR_ALIGN));
- debug_assert!(ptr > start_ptr && end_ptr.sub(VECTOR_SIZE) >= start_ptr);
- while loop_size == LOOP_SIZE && ptr <= end_ptr.sub(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- let a = _mm256_load_si256(ptr as *const __m256i);
- let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
- let c = _mm256_load_si256(ptr.add(2 * VECTOR_SIZE) as *const __m256i);
- let d = _mm256_load_si256(ptr.add(3 * VECTOR_SIZE) as *const __m256i);
- let eqa = _mm256_cmpeq_epi8(vn1, a);
- let eqb = _mm256_cmpeq_epi8(vn1, b);
- let eqc = _mm256_cmpeq_epi8(vn1, c);
- let eqd = _mm256_cmpeq_epi8(vn1, d);
- let or1 = _mm256_or_si256(eqa, eqb);
- let or2 = _mm256_or_si256(eqc, eqd);
- let or3 = _mm256_or_si256(or1, or2);
-
- if _mm256_movemask_epi8(or3) != 0 {
- return Some(matched(start_ptr, ptr, eqa, eqb, eqc, eqd));
- }
- ptr = ptr.add(loop_size);
- }
- while ptr <= end_ptr.sub(VECTOR_SIZE) {
- debug_assert!(sub(end_ptr, ptr) >= VECTOR_SIZE);
-
- if let Some(i) = forward_search1(start_ptr, end_ptr, ptr, vn1) {
- return Some(i);
- }
- ptr = ptr.add(VECTOR_SIZE);
- }
- if ptr < end_ptr {
- debug_assert!(sub(end_ptr, ptr) < VECTOR_SIZE);
- ptr = ptr.sub(VECTOR_SIZE - sub(end_ptr, ptr));
- debug_assert_eq!(sub(end_ptr, ptr), VECTOR_SIZE);
-
- return forward_search1(start_ptr, end_ptr, ptr, vn1);
- }
- None
-}
-
-#[target_feature(enable = "avx2")]
-pub unsafe fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- #[cold]
- #[inline(never)]
- #[target_feature(enable = "avx2")]
- unsafe fn matched(
- start_ptr: *const u8,
- ptr: *const u8,
- eqa1: __m256i,
- eqa2: __m256i,
- eqb1: __m256i,
- eqb2: __m256i,
- ) -> usize {
- let mut at = sub(ptr, start_ptr);
- let mask1 = _mm256_movemask_epi8(eqa1);
- let mask2 = _mm256_movemask_epi8(eqa2);
- if mask1 != 0 || mask2 != 0 {
- return at + forward_pos2(mask1, mask2);
- }
-
- at += VECTOR_SIZE;
- let mask1 = _mm256_movemask_epi8(eqb1);
- let mask2 = _mm256_movemask_epi8(eqb2);
- at + forward_pos2(mask1, mask2)
- }
-
- let vn1 = _mm256_set1_epi8(n1 as i8);
- let vn2 = _mm256_set1_epi8(n2 as i8);
- let len = haystack.len();
- let loop_size = cmp::min(LOOP_SIZE2, len);
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = start_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr < end_ptr {
- if *ptr == n1 || *ptr == n2 {
- return Some(sub(ptr, start_ptr));
- }
- ptr = ptr.offset(1);
- }
- return None;
- }
-
- if let Some(i) = forward_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
- return Some(i);
- }
-
- ptr = ptr.add(VECTOR_SIZE - (start_ptr as usize & VECTOR_ALIGN));
- debug_assert!(ptr > start_ptr && end_ptr.sub(VECTOR_SIZE) >= start_ptr);
- while loop_size == LOOP_SIZE2 && ptr <= end_ptr.sub(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- let a = _mm256_load_si256(ptr as *const __m256i);
- let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
- let eqa1 = _mm256_cmpeq_epi8(vn1, a);
- let eqb1 = _mm256_cmpeq_epi8(vn1, b);
- let eqa2 = _mm256_cmpeq_epi8(vn2, a);
- let eqb2 = _mm256_cmpeq_epi8(vn2, b);
- let or1 = _mm256_or_si256(eqa1, eqb1);
- let or2 = _mm256_or_si256(eqa2, eqb2);
- let or3 = _mm256_or_si256(or1, or2);
- if _mm256_movemask_epi8(or3) != 0 {
- return Some(matched(start_ptr, ptr, eqa1, eqa2, eqb1, eqb2));
- }
- ptr = ptr.add(loop_size);
- }
- while ptr <= end_ptr.sub(VECTOR_SIZE) {
- if let Some(i) = forward_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
- return Some(i);
- }
- ptr = ptr.add(VECTOR_SIZE);
- }
- if ptr < end_ptr {
- debug_assert!(sub(end_ptr, ptr) < VECTOR_SIZE);
- ptr = ptr.sub(VECTOR_SIZE - sub(end_ptr, ptr));
- debug_assert_eq!(sub(end_ptr, ptr), VECTOR_SIZE);
-
- return forward_search2(start_ptr, end_ptr, ptr, vn1, vn2);
- }
- None
-}
-
-#[target_feature(enable = "avx2")]
-pub unsafe fn memchr3(
- n1: u8,
- n2: u8,
- n3: u8,
- haystack: &[u8],
-) -> Option<usize> {
- #[cold]
- #[inline(never)]
- #[target_feature(enable = "avx2")]
- unsafe fn matched(
- start_ptr: *const u8,
- ptr: *const u8,
- eqa1: __m256i,
- eqa2: __m256i,
- eqa3: __m256i,
- eqb1: __m256i,
- eqb2: __m256i,
- eqb3: __m256i,
- ) -> usize {
- let mut at = sub(ptr, start_ptr);
- let mask1 = _mm256_movemask_epi8(eqa1);
- let mask2 = _mm256_movemask_epi8(eqa2);
- let mask3 = _mm256_movemask_epi8(eqa3);
- if mask1 != 0 || mask2 != 0 || mask3 != 0 {
- return at + forward_pos3(mask1, mask2, mask3);
- }
-
- at += VECTOR_SIZE;
- let mask1 = _mm256_movemask_epi8(eqb1);
- let mask2 = _mm256_movemask_epi8(eqb2);
- let mask3 = _mm256_movemask_epi8(eqb3);
- at + forward_pos3(mask1, mask2, mask3)
- }
-
- let vn1 = _mm256_set1_epi8(n1 as i8);
- let vn2 = _mm256_set1_epi8(n2 as i8);
- let vn3 = _mm256_set1_epi8(n3 as i8);
- let len = haystack.len();
- let loop_size = cmp::min(LOOP_SIZE2, len);
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = start_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr < end_ptr {
- if *ptr == n1 || *ptr == n2 || *ptr == n3 {
- return Some(sub(ptr, start_ptr));
- }
- ptr = ptr.offset(1);
- }
- return None;
- }
-
- if let Some(i) = forward_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3) {
- return Some(i);
- }
-
- ptr = ptr.add(VECTOR_SIZE - (start_ptr as usize & VECTOR_ALIGN));
- debug_assert!(ptr > start_ptr && end_ptr.sub(VECTOR_SIZE) >= start_ptr);
- while loop_size == LOOP_SIZE2 && ptr <= end_ptr.sub(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- let a = _mm256_load_si256(ptr as *const __m256i);
- let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
- let eqa1 = _mm256_cmpeq_epi8(vn1, a);
- let eqb1 = _mm256_cmpeq_epi8(vn1, b);
- let eqa2 = _mm256_cmpeq_epi8(vn2, a);
- let eqb2 = _mm256_cmpeq_epi8(vn2, b);
- let eqa3 = _mm256_cmpeq_epi8(vn3, a);
- let eqb3 = _mm256_cmpeq_epi8(vn3, b);
- let or1 = _mm256_or_si256(eqa1, eqb1);
- let or2 = _mm256_or_si256(eqa2, eqb2);
- let or3 = _mm256_or_si256(eqa3, eqb3);
- let or4 = _mm256_or_si256(or1, or2);
- let or5 = _mm256_or_si256(or3, or4);
- if _mm256_movemask_epi8(or5) != 0 {
- return Some(matched(
- start_ptr, ptr, eqa1, eqa2, eqa3, eqb1, eqb2, eqb3,
- ));
- }
- ptr = ptr.add(loop_size);
- }
- while ptr <= end_ptr.sub(VECTOR_SIZE) {
- if let Some(i) =
- forward_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3)
- {
- return Some(i);
- }
- ptr = ptr.add(VECTOR_SIZE);
- }
- if ptr < end_ptr {
- debug_assert!(sub(end_ptr, ptr) < VECTOR_SIZE);
- ptr = ptr.sub(VECTOR_SIZE - sub(end_ptr, ptr));
- debug_assert_eq!(sub(end_ptr, ptr), VECTOR_SIZE);
-
- return forward_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3);
- }
- None
-}
-
-#[target_feature(enable = "avx2")]
-pub unsafe fn memrchr(n1: u8, haystack: &[u8]) -> Option<usize> {
- let vn1 = _mm256_set1_epi8(n1 as i8);
- let len = haystack.len();
- let loop_size = cmp::min(LOOP_SIZE, len);
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = end_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr > start_ptr {
- ptr = ptr.offset(-1);
- if *ptr == n1 {
- return Some(sub(ptr, start_ptr));
- }
- }
- return None;
- }
-
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) = reverse_search1(start_ptr, end_ptr, ptr, vn1) {
- return Some(i);
- }
-
- ptr = (end_ptr as usize & !VECTOR_ALIGN) as *const u8;
- debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
- while loop_size == LOOP_SIZE && ptr >= start_ptr.add(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- ptr = ptr.sub(loop_size);
- let a = _mm256_load_si256(ptr as *const __m256i);
- let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
- let c = _mm256_load_si256(ptr.add(2 * VECTOR_SIZE) as *const __m256i);
- let d = _mm256_load_si256(ptr.add(3 * VECTOR_SIZE) as *const __m256i);
- let eqa = _mm256_cmpeq_epi8(vn1, a);
- let eqb = _mm256_cmpeq_epi8(vn1, b);
- let eqc = _mm256_cmpeq_epi8(vn1, c);
- let eqd = _mm256_cmpeq_epi8(vn1, d);
- let or1 = _mm256_or_si256(eqa, eqb);
- let or2 = _mm256_or_si256(eqc, eqd);
- let or3 = _mm256_or_si256(or1, or2);
- if _mm256_movemask_epi8(or3) != 0 {
- let mut at = sub(ptr.add(3 * VECTOR_SIZE), start_ptr);
- let mask = _mm256_movemask_epi8(eqd);
- if mask != 0 {
- return Some(at + reverse_pos(mask));
- }
-
- at -= VECTOR_SIZE;
- let mask = _mm256_movemask_epi8(eqc);
- if mask != 0 {
- return Some(at + reverse_pos(mask));
- }
-
- at -= VECTOR_SIZE;
- let mask = _mm256_movemask_epi8(eqb);
- if mask != 0 {
- return Some(at + reverse_pos(mask));
- }
-
- at -= VECTOR_SIZE;
- let mask = _mm256_movemask_epi8(eqa);
- debug_assert!(mask != 0);
- return Some(at + reverse_pos(mask));
- }
- }
- while ptr >= start_ptr.add(VECTOR_SIZE) {
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) = reverse_search1(start_ptr, end_ptr, ptr, vn1) {
- return Some(i);
- }
- }
- if ptr > start_ptr {
- debug_assert!(sub(ptr, start_ptr) < VECTOR_SIZE);
- return reverse_search1(start_ptr, end_ptr, start_ptr, vn1);
- }
- None
-}
-
-#[target_feature(enable = "avx2")]
-pub unsafe fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- let vn1 = _mm256_set1_epi8(n1 as i8);
- let vn2 = _mm256_set1_epi8(n2 as i8);
- let len = haystack.len();
- let loop_size = cmp::min(LOOP_SIZE2, len);
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = end_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr > start_ptr {
- ptr = ptr.offset(-1);
- if *ptr == n1 || *ptr == n2 {
- return Some(sub(ptr, start_ptr));
- }
- }
- return None;
- }
-
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) = reverse_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
- return Some(i);
- }
-
- ptr = (end_ptr as usize & !VECTOR_ALIGN) as *const u8;
- debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
- while loop_size == LOOP_SIZE2 && ptr >= start_ptr.add(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- ptr = ptr.sub(loop_size);
- let a = _mm256_load_si256(ptr as *const __m256i);
- let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
- let eqa1 = _mm256_cmpeq_epi8(vn1, a);
- let eqb1 = _mm256_cmpeq_epi8(vn1, b);
- let eqa2 = _mm256_cmpeq_epi8(vn2, a);
- let eqb2 = _mm256_cmpeq_epi8(vn2, b);
- let or1 = _mm256_or_si256(eqa1, eqb1);
- let or2 = _mm256_or_si256(eqa2, eqb2);
- let or3 = _mm256_or_si256(or1, or2);
- if _mm256_movemask_epi8(or3) != 0 {
- let mut at = sub(ptr.add(VECTOR_SIZE), start_ptr);
- let mask1 = _mm256_movemask_epi8(eqb1);
- let mask2 = _mm256_movemask_epi8(eqb2);
- if mask1 != 0 || mask2 != 0 {
- return Some(at + reverse_pos2(mask1, mask2));
- }
-
- at -= VECTOR_SIZE;
- let mask1 = _mm256_movemask_epi8(eqa1);
- let mask2 = _mm256_movemask_epi8(eqa2);
- return Some(at + reverse_pos2(mask1, mask2));
- }
- }
- while ptr >= start_ptr.add(VECTOR_SIZE) {
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) = reverse_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
- return Some(i);
- }
- }
- if ptr > start_ptr {
- debug_assert!(sub(ptr, start_ptr) < VECTOR_SIZE);
- return reverse_search2(start_ptr, end_ptr, start_ptr, vn1, vn2);
- }
- None
-}
-
-#[target_feature(enable = "avx2")]
-pub unsafe fn memrchr3(
- n1: u8,
- n2: u8,
- n3: u8,
- haystack: &[u8],
-) -> Option<usize> {
- let vn1 = _mm256_set1_epi8(n1 as i8);
- let vn2 = _mm256_set1_epi8(n2 as i8);
- let vn3 = _mm256_set1_epi8(n3 as i8);
- let len = haystack.len();
- let loop_size = cmp::min(LOOP_SIZE2, len);
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = end_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr > start_ptr {
- ptr = ptr.offset(-1);
- if *ptr == n1 || *ptr == n2 || *ptr == n3 {
- return Some(sub(ptr, start_ptr));
- }
- }
- return None;
- }
-
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) = reverse_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3) {
- return Some(i);
- }
-
- ptr = (end_ptr as usize & !VECTOR_ALIGN) as *const u8;
- debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
- while loop_size == LOOP_SIZE2 && ptr >= start_ptr.add(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- ptr = ptr.sub(loop_size);
- let a = _mm256_load_si256(ptr as *const __m256i);
- let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
- let eqa1 = _mm256_cmpeq_epi8(vn1, a);
- let eqb1 = _mm256_cmpeq_epi8(vn1, b);
- let eqa2 = _mm256_cmpeq_epi8(vn2, a);
- let eqb2 = _mm256_cmpeq_epi8(vn2, b);
- let eqa3 = _mm256_cmpeq_epi8(vn3, a);
- let eqb3 = _mm256_cmpeq_epi8(vn3, b);
- let or1 = _mm256_or_si256(eqa1, eqb1);
- let or2 = _mm256_or_si256(eqa2, eqb2);
- let or3 = _mm256_or_si256(eqa3, eqb3);
- let or4 = _mm256_or_si256(or1, or2);
- let or5 = _mm256_or_si256(or3, or4);
- if _mm256_movemask_epi8(or5) != 0 {
- let mut at = sub(ptr.add(VECTOR_SIZE), start_ptr);
- let mask1 = _mm256_movemask_epi8(eqb1);
- let mask2 = _mm256_movemask_epi8(eqb2);
- let mask3 = _mm256_movemask_epi8(eqb3);
- if mask1 != 0 || mask2 != 0 || mask3 != 0 {
- return Some(at + reverse_pos3(mask1, mask2, mask3));
- }
-
- at -= VECTOR_SIZE;
- let mask1 = _mm256_movemask_epi8(eqa1);
- let mask2 = _mm256_movemask_epi8(eqa2);
- let mask3 = _mm256_movemask_epi8(eqa3);
- return Some(at + reverse_pos3(mask1, mask2, mask3));
- }
- }
- while ptr >= start_ptr.add(VECTOR_SIZE) {
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) =
- reverse_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3)
- {
- return Some(i);
- }
- }
- if ptr > start_ptr {
- debug_assert!(sub(ptr, start_ptr) < VECTOR_SIZE);
- return reverse_search3(start_ptr, end_ptr, start_ptr, vn1, vn2, vn3);
- }
- None
-}
-
-#[target_feature(enable = "avx2")]
-unsafe fn forward_search1(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m256i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm256_loadu_si256(ptr as *const __m256i);
- let mask = _mm256_movemask_epi8(_mm256_cmpeq_epi8(chunk, vn1));
- if mask != 0 {
- Some(sub(ptr, start_ptr) + forward_pos(mask))
- } else {
- None
- }
-}
-
-#[target_feature(enable = "avx2")]
-unsafe fn forward_search2(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m256i,
- vn2: __m256i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm256_loadu_si256(ptr as *const __m256i);
- let eq1 = _mm256_cmpeq_epi8(chunk, vn1);
- let eq2 = _mm256_cmpeq_epi8(chunk, vn2);
- if _mm256_movemask_epi8(_mm256_or_si256(eq1, eq2)) != 0 {
- let mask1 = _mm256_movemask_epi8(eq1);
- let mask2 = _mm256_movemask_epi8(eq2);
- Some(sub(ptr, start_ptr) + forward_pos2(mask1, mask2))
- } else {
- None
- }
-}
-
-#[target_feature(enable = "avx2")]
-unsafe fn forward_search3(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m256i,
- vn2: __m256i,
- vn3: __m256i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm256_loadu_si256(ptr as *const __m256i);
- let eq1 = _mm256_cmpeq_epi8(chunk, vn1);
- let eq2 = _mm256_cmpeq_epi8(chunk, vn2);
- let eq3 = _mm256_cmpeq_epi8(chunk, vn3);
- let or = _mm256_or_si256(eq1, eq2);
- if _mm256_movemask_epi8(_mm256_or_si256(or, eq3)) != 0 {
- let mask1 = _mm256_movemask_epi8(eq1);
- let mask2 = _mm256_movemask_epi8(eq2);
- let mask3 = _mm256_movemask_epi8(eq3);
- Some(sub(ptr, start_ptr) + forward_pos3(mask1, mask2, mask3))
- } else {
- None
- }
-}
-
-#[target_feature(enable = "avx2")]
-unsafe fn reverse_search1(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m256i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm256_loadu_si256(ptr as *const __m256i);
- let mask = _mm256_movemask_epi8(_mm256_cmpeq_epi8(vn1, chunk));
- if mask != 0 {
- Some(sub(ptr, start_ptr) + reverse_pos(mask))
- } else {
- None
- }
-}
-
-#[target_feature(enable = "avx2")]
-unsafe fn reverse_search2(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m256i,
- vn2: __m256i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm256_loadu_si256(ptr as *const __m256i);
- let eq1 = _mm256_cmpeq_epi8(chunk, vn1);
- let eq2 = _mm256_cmpeq_epi8(chunk, vn2);
- if _mm256_movemask_epi8(_mm256_or_si256(eq1, eq2)) != 0 {
- let mask1 = _mm256_movemask_epi8(eq1);
- let mask2 = _mm256_movemask_epi8(eq2);
- Some(sub(ptr, start_ptr) + reverse_pos2(mask1, mask2))
- } else {
- None
- }
-}
-
-#[target_feature(enable = "avx2")]
-unsafe fn reverse_search3(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m256i,
- vn2: __m256i,
- vn3: __m256i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm256_loadu_si256(ptr as *const __m256i);
- let eq1 = _mm256_cmpeq_epi8(chunk, vn1);
- let eq2 = _mm256_cmpeq_epi8(chunk, vn2);
- let eq3 = _mm256_cmpeq_epi8(chunk, vn3);
- let or = _mm256_or_si256(eq1, eq2);
- if _mm256_movemask_epi8(_mm256_or_si256(or, eq3)) != 0 {
- let mask1 = _mm256_movemask_epi8(eq1);
- let mask2 = _mm256_movemask_epi8(eq2);
- let mask3 = _mm256_movemask_epi8(eq3);
- Some(sub(ptr, start_ptr) + reverse_pos3(mask1, mask2, mask3))
- } else {
- None
- }
-}
-
-/// Compute the position of the first matching byte from the given mask. The
-/// position returned is always in the range [0, 31].
-///
-/// The mask given is expected to be the result of _mm256_movemask_epi8.
-fn forward_pos(mask: i32) -> usize {
- // We are dealing with little endian here, where the most significant byte
- // is at a higher address. That means the least significant bit that is set
- // corresponds to the position of our first matching byte. That position
- // corresponds to the number of zeros after the least significant bit.
- mask.trailing_zeros() as usize
-}
-
-/// Compute the position of the first matching byte from the given masks. The
-/// position returned is always in the range [0, 31]. Each mask corresponds to
-/// the equality comparison of a single byte.
-///
-/// The masks given are expected to be the result of _mm256_movemask_epi8,
-/// where at least one of the masks is non-zero (i.e., indicates a match).
-fn forward_pos2(mask1: i32, mask2: i32) -> usize {
- debug_assert!(mask1 != 0 || mask2 != 0);
-
- forward_pos(mask1 | mask2)
-}
-
-/// Compute the position of the first matching byte from the given masks. The
-/// position returned is always in the range [0, 31]. Each mask corresponds to
-/// the equality comparison of a single byte.
-///
-/// The masks given are expected to be the result of _mm256_movemask_epi8,
-/// where at least one of the masks is non-zero (i.e., indicates a match).
-fn forward_pos3(mask1: i32, mask2: i32, mask3: i32) -> usize {
- debug_assert!(mask1 != 0 || mask2 != 0 || mask3 != 0);
-
- forward_pos(mask1 | mask2 | mask3)
-}
-
-/// Compute the position of the last matching byte from the given mask. The
-/// position returned is always in the range [0, 31].
-///
-/// The mask given is expected to be the result of _mm256_movemask_epi8.
-fn reverse_pos(mask: i32) -> usize {
- // We are dealing with little endian here, where the most significant byte
- // is at a higher address. That means the most significant bit that is set
- // corresponds to the position of our last matching byte. The position from
- // the end of the mask is therefore the number of leading zeros in a 32
- // bit integer, and the position from the start of the mask is therefore
- // 32 - (leading zeros) - 1.
- VECTOR_SIZE - (mask as u32).leading_zeros() as usize - 1
-}
-
-/// Compute the position of the last matching byte from the given masks. The
-/// position returned is always in the range [0, 31]. Each mask corresponds to
-/// the equality comparison of a single byte.
-///
-/// The masks given are expected to be the result of _mm256_movemask_epi8,
-/// where at least one of the masks is non-zero (i.e., indicates a match).
-fn reverse_pos2(mask1: i32, mask2: i32) -> usize {
- debug_assert!(mask1 != 0 || mask2 != 0);
-
- reverse_pos(mask1 | mask2)
-}
-
-/// Compute the position of the last matching byte from the given masks. The
-/// position returned is always in the range [0, 31]. Each mask corresponds to
-/// the equality comparison of a single byte.
-///
-/// The masks given are expected to be the result of _mm256_movemask_epi8,
-/// where at least one of the masks is non-zero (i.e., indicates a match).
-fn reverse_pos3(mask1: i32, mask2: i32, mask3: i32) -> usize {
- debug_assert!(mask1 != 0 || mask2 != 0 || mask3 != 0);
-
- reverse_pos(mask1 | mask2 | mask3)
-}
-
-/// Subtract `b` from `a` and return the difference. `a` should be greater than
-/// or equal to `b`.
-fn sub(a: *const u8, b: *const u8) -> usize {
- debug_assert!(a >= b);
- (a as usize) - (b as usize)
-}
diff --git a/vendor/memchr/src/memchr/x86/mod.rs b/vendor/memchr/src/memchr/x86/mod.rs
deleted file mode 100644
index aec35dbff..000000000
--- a/vendor/memchr/src/memchr/x86/mod.rs
+++ /dev/null
@@ -1,148 +0,0 @@
-use super::fallback;
-
-// We only use AVX when we can detect at runtime whether it's available, which
-// requires std.
-#[cfg(feature = "std")]
-mod avx;
-mod sse2;
-
-/// This macro employs a gcc-like "ifunc" trick where by upon first calling
-/// `memchr` (for example), CPU feature detection will be performed at runtime
-/// to determine the best implementation to use. After CPU feature detection
-/// is done, we replace `memchr`'s function pointer with the selection. Upon
-/// subsequent invocations, the CPU-specific routine is invoked directly, which
-/// skips the CPU feature detection and subsequent branch that's required.
-///
-/// While this typically doesn't matter for rare occurrences or when used on
-/// larger haystacks, `memchr` can be called in tight loops where the overhead
-/// of this branch can actually add up *and is measurable*. This trick was
-/// necessary to bring this implementation up to glibc's speeds for the 'tiny'
-/// benchmarks, for example.
-///
-/// At some point, I expect the Rust ecosystem will get a nice macro for doing
-/// exactly this, at which point, we can replace our hand-jammed version of it.
-///
-/// N.B. The ifunc strategy does prevent function inlining of course, but
-/// on modern CPUs, you'll probably end up with the AVX2 implementation,
-/// which probably can't be inlined anyway---unless you've compiled your
-/// entire program with AVX2 enabled. However, even then, the various memchr
-/// implementations aren't exactly small, so inlining might not help anyway!
-///
-/// # Safety
-///
-/// Callers must ensure that fnty is function pointer type.
-#[cfg(feature = "std")]
-macro_rules! unsafe_ifunc {
- ($fnty:ty, $name:ident, $haystack:ident, $($needle:ident),+) => {{
- use std::{mem, sync::atomic::{AtomicPtr, Ordering}};
-
- type FnRaw = *mut ();
-
- static FN: AtomicPtr<()> = AtomicPtr::new(detect as FnRaw);
-
- fn detect($($needle: u8),+, haystack: &[u8]) -> Option<usize> {
- let fun =
- if cfg!(memchr_runtime_avx) && is_x86_feature_detected!("avx2") {
- avx::$name as FnRaw
- } else if cfg!(memchr_runtime_sse2) {
- sse2::$name as FnRaw
- } else {
- fallback::$name as FnRaw
- };
- FN.store(fun as FnRaw, Ordering::Relaxed);
- // SAFETY: By virtue of the caller contract, $fnty is a function
- // pointer, which is always safe to transmute with a *mut ().
- // Also, if 'fun is the AVX routine, then it is guaranteed to be
- // supported since we checked the avx2 feature.
- unsafe {
- mem::transmute::<FnRaw, $fnty>(fun)($($needle),+, haystack)
- }
- }
-
- // SAFETY: By virtue of the caller contract, $fnty is a function
- // pointer, which is always safe to transmute with a *mut (). Also, if
- // 'fun is the AVX routine, then it is guaranteed to be supported since
- // we checked the avx2 feature.
- unsafe {
- let fun = FN.load(Ordering::Relaxed);
- mem::transmute::<FnRaw, $fnty>(fun)($($needle),+, $haystack)
- }
- }}
-}
-
-/// When std isn't available to provide runtime CPU feature detection, or if
-/// runtime CPU feature detection has been explicitly disabled, then just
-/// call our optimized SSE2 routine directly. SSE2 is avalbale on all x86_64
-/// targets, so no CPU feature detection is necessary.
-///
-/// # Safety
-///
-/// There are no safety requirements for this definition of the macro. It is
-/// safe for all inputs since it is restricted to either the fallback routine
-/// or the SSE routine, which is always safe to call on x86_64.
-#[cfg(not(feature = "std"))]
-macro_rules! unsafe_ifunc {
- ($fnty:ty, $name:ident, $haystack:ident, $($needle:ident),+) => {{
- if cfg!(memchr_runtime_sse2) {
- unsafe { sse2::$name($($needle),+, $haystack) }
- } else {
- fallback::$name($($needle),+, $haystack)
- }
- }}
-}
-
-#[inline(always)]
-pub fn memchr(n1: u8, haystack: &[u8]) -> Option<usize> {
- unsafe_ifunc!(fn(u8, &[u8]) -> Option<usize>, memchr, haystack, n1)
-}
-
-#[inline(always)]
-pub fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- unsafe_ifunc!(
- fn(u8, u8, &[u8]) -> Option<usize>,
- memchr2,
- haystack,
- n1,
- n2
- )
-}
-
-#[inline(always)]
-pub fn memchr3(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- unsafe_ifunc!(
- fn(u8, u8, u8, &[u8]) -> Option<usize>,
- memchr3,
- haystack,
- n1,
- n2,
- n3
- )
-}
-
-#[inline(always)]
-pub fn memrchr(n1: u8, haystack: &[u8]) -> Option<usize> {
- unsafe_ifunc!(fn(u8, &[u8]) -> Option<usize>, memrchr, haystack, n1)
-}
-
-#[inline(always)]
-pub fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- unsafe_ifunc!(
- fn(u8, u8, &[u8]) -> Option<usize>,
- memrchr2,
- haystack,
- n1,
- n2
- )
-}
-
-#[inline(always)]
-pub fn memrchr3(n1: u8, n2: u8, n3: u8, haystack: &[u8]) -> Option<usize> {
- unsafe_ifunc!(
- fn(u8, u8, u8, &[u8]) -> Option<usize>,
- memrchr3,
- haystack,
- n1,
- n2,
- n3
- )
-}
diff --git a/vendor/memchr/src/memchr/x86/sse2.rs b/vendor/memchr/src/memchr/x86/sse2.rs
deleted file mode 100644
index b7b3a9328..000000000
--- a/vendor/memchr/src/memchr/x86/sse2.rs
+++ /dev/null
@@ -1,791 +0,0 @@
-use core::{arch::x86_64::*, cmp, mem::size_of};
-
-const VECTOR_SIZE: usize = size_of::<__m128i>();
-const VECTOR_ALIGN: usize = VECTOR_SIZE - 1;
-
-// The number of bytes to loop at in one iteration of memchr/memrchr.
-const LOOP_SIZE: usize = 4 * VECTOR_SIZE;
-
-// The number of bytes to loop at in one iteration of memchr2/memrchr2 and
-// memchr3/memrchr3. There was no observable difference between 64 and 32 bytes
-// in benchmarks. memchr3 in particular only gets a very slight speed up from
-// the loop unrolling.
-const LOOP_SIZE2: usize = 2 * VECTOR_SIZE;
-
-#[target_feature(enable = "sse2")]
-pub unsafe fn memchr(n1: u8, haystack: &[u8]) -> Option<usize> {
- // What follows is a fast SSE2-only algorithm to detect the position of
- // `n1` in `haystack` if it exists. From what I know, this is the "classic"
- // algorithm. I believe it can be found in places like glibc and Go's
- // standard library. It appears to be well known and is elaborated on in
- // more detail here: https://gms.tf/stdfind-and-memchr-optimizations.html
- //
- // While this routine is very long, the basic idea is actually very simple
- // and can be expressed straight-forwardly in pseudo code:
- //
- // needle = (n1 << 15) | (n1 << 14) | ... | (n1 << 1) | n1
- // // Note: shift amount in bytes
- //
- // while i <= haystack.len() - 16:
- // // A 16 byte vector. Each byte in chunk corresponds to a byte in
- // // the haystack.
- // chunk = haystack[i:i+16]
- // // Compare bytes in needle with bytes in chunk. The result is a 16
- // // byte chunk where each byte is 0xFF if the corresponding bytes
- // // in needle and chunk were equal, or 0x00 otherwise.
- // eqs = cmpeq(needle, chunk)
- // // Return a 32 bit integer where the most significant 16 bits
- // // are always 0 and the lower 16 bits correspond to whether the
- // // most significant bit in the correspond byte in `eqs` is set.
- // // In other words, `mask as u16` has bit i set if and only if
- // // needle[i] == chunk[i].
- // mask = movemask(eqs)
- //
- // // Mask is 0 if there is no match, and non-zero otherwise.
- // if mask != 0:
- // // trailing_zeros tells us the position of the least significant
- // // bit that is set.
- // return i + trailing_zeros(mask)
- //
- // // haystack length may not be a multiple of 16, so search the rest.
- // while i < haystack.len():
- // if haystack[i] == n1:
- // return i
- //
- // // No match found.
- // return NULL
- //
- // In fact, we could loosely translate the above code to Rust line-for-line
- // and it would be a pretty fast algorithm. But, we pull out all the stops
- // to go as fast as possible:
- //
- // 1. We use aligned loads. That is, we do some finagling to make sure our
- // primary loop not only proceeds in increments of 16 bytes, but that
- // the address of haystack's pointer that we dereference is aligned to
- // 16 bytes. 16 is a magic number here because it is the size of SSE2
- // 128-bit vector. (For the AVX2 algorithm, 32 is the magic number.)
- // Therefore, to get aligned loads, our pointer's address must be evenly
- // divisible by 16.
- // 2. Our primary loop proceeds 64 bytes at a time instead of 16. It's
- // kind of like loop unrolling, but we combine the equality comparisons
- // using a vector OR such that we only need to extract a single mask to
- // determine whether a match exists or not. If so, then we do some
- // book-keeping to determine the precise location but otherwise mush on.
- // 3. We use our "chunk" comparison routine in as many places as possible,
- // even if it means using unaligned loads. In particular, if haystack
- // starts with an unaligned address, then we do an unaligned load to
- // search the first 16 bytes. We then start our primary loop at the
- // smallest subsequent aligned address, which will actually overlap with
- // previously searched bytes. But we're OK with that. We do a similar
- // dance at the end of our primary loop. Finally, to avoid a
- // byte-at-a-time loop at the end, we do a final 16 byte unaligned load
- // that may overlap with a previous load. This is OK because it converts
- // a loop into a small number of very fast vector instructions.
- //
- // The primary downside of this algorithm is that it's effectively
- // completely unsafe. Therefore, we have to be super careful to avoid
- // undefined behavior:
- //
- // 1. We use raw pointers everywhere. Not only does dereferencing a pointer
- // require the pointer to be valid, but we actually can't even store the
- // address of an invalid pointer (unless it's 1 past the end of
- // haystack) without sacrificing performance.
- // 2. _mm_loadu_si128 is used when you don't care about alignment, and
- // _mm_load_si128 is used when you do care. You cannot use the latter
- // on unaligned pointers.
- // 3. We make liberal use of debug_assert! to check assumptions.
- // 4. We make a concerted effort to stick with pointers instead of indices.
- // Indices are nicer because there's less to worry about with them (see
- // above about pointer offsets), but I could not get the compiler to
- // produce as good of code as what the below produces. In any case,
- // pointers are what we really care about here, and alignment is
- // expressed a bit more naturally with them.
- //
- // In general, most of the algorithms in this crate have a similar
- // structure to what you see below, so this comment applies fairly well to
- // all of them.
-
- let vn1 = _mm_set1_epi8(n1 as i8);
- let len = haystack.len();
- let loop_size = cmp::min(LOOP_SIZE, len);
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = start_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr < end_ptr {
- if *ptr == n1 {
- return Some(sub(ptr, start_ptr));
- }
- ptr = ptr.offset(1);
- }
- return None;
- }
-
- if let Some(i) = forward_search1(start_ptr, end_ptr, ptr, vn1) {
- return Some(i);
- }
-
- ptr = ptr.add(VECTOR_SIZE - (start_ptr as usize & VECTOR_ALIGN));
- debug_assert!(ptr > start_ptr && end_ptr.sub(VECTOR_SIZE) >= start_ptr);
- while loop_size == LOOP_SIZE && ptr <= end_ptr.sub(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- let a = _mm_load_si128(ptr as *const __m128i);
- let b = _mm_load_si128(ptr.add(VECTOR_SIZE) as *const __m128i);
- let c = _mm_load_si128(ptr.add(2 * VECTOR_SIZE) as *const __m128i);
- let d = _mm_load_si128(ptr.add(3 * VECTOR_SIZE) as *const __m128i);
- let eqa = _mm_cmpeq_epi8(vn1, a);
- let eqb = _mm_cmpeq_epi8(vn1, b);
- let eqc = _mm_cmpeq_epi8(vn1, c);
- let eqd = _mm_cmpeq_epi8(vn1, d);
- let or1 = _mm_or_si128(eqa, eqb);
- let or2 = _mm_or_si128(eqc, eqd);
- let or3 = _mm_or_si128(or1, or2);
- if _mm_movemask_epi8(or3) != 0 {
- let mut at = sub(ptr, start_ptr);
- let mask = _mm_movemask_epi8(eqa);
- if mask != 0 {
- return Some(at + forward_pos(mask));
- }
-
- at += VECTOR_SIZE;
- let mask = _mm_movemask_epi8(eqb);
- if mask != 0 {
- return Some(at + forward_pos(mask));
- }
-
- at += VECTOR_SIZE;
- let mask = _mm_movemask_epi8(eqc);
- if mask != 0 {
- return Some(at + forward_pos(mask));
- }
-
- at += VECTOR_SIZE;
- let mask = _mm_movemask_epi8(eqd);
- debug_assert!(mask != 0);
- return Some(at + forward_pos(mask));
- }
- ptr = ptr.add(loop_size);
- }
- while ptr <= end_ptr.sub(VECTOR_SIZE) {
- debug_assert!(sub(end_ptr, ptr) >= VECTOR_SIZE);
-
- if let Some(i) = forward_search1(start_ptr, end_ptr, ptr, vn1) {
- return Some(i);
- }
- ptr = ptr.add(VECTOR_SIZE);
- }
- if ptr < end_ptr {
- debug_assert!(sub(end_ptr, ptr) < VECTOR_SIZE);
- ptr = ptr.sub(VECTOR_SIZE - sub(end_ptr, ptr));
- debug_assert_eq!(sub(end_ptr, ptr), VECTOR_SIZE);
-
- return forward_search1(start_ptr, end_ptr, ptr, vn1);
- }
- None
-}
-
-#[target_feature(enable = "sse2")]
-pub unsafe fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- let vn1 = _mm_set1_epi8(n1 as i8);
- let vn2 = _mm_set1_epi8(n2 as i8);
- let len = haystack.len();
- let loop_size = cmp::min(LOOP_SIZE2, len);
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = start_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr < end_ptr {
- if *ptr == n1 || *ptr == n2 {
- return Some(sub(ptr, start_ptr));
- }
- ptr = ptr.offset(1);
- }
- return None;
- }
-
- if let Some(i) = forward_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
- return Some(i);
- }
-
- ptr = ptr.add(VECTOR_SIZE - (start_ptr as usize & VECTOR_ALIGN));
- debug_assert!(ptr > start_ptr && end_ptr.sub(VECTOR_SIZE) >= start_ptr);
- while loop_size == LOOP_SIZE2 && ptr <= end_ptr.sub(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- let a = _mm_load_si128(ptr as *const __m128i);
- let b = _mm_load_si128(ptr.add(VECTOR_SIZE) as *const __m128i);
- let eqa1 = _mm_cmpeq_epi8(vn1, a);
- let eqb1 = _mm_cmpeq_epi8(vn1, b);
- let eqa2 = _mm_cmpeq_epi8(vn2, a);
- let eqb2 = _mm_cmpeq_epi8(vn2, b);
- let or1 = _mm_or_si128(eqa1, eqb1);
- let or2 = _mm_or_si128(eqa2, eqb2);
- let or3 = _mm_or_si128(or1, or2);
- if _mm_movemask_epi8(or3) != 0 {
- let mut at = sub(ptr, start_ptr);
- let mask1 = _mm_movemask_epi8(eqa1);
- let mask2 = _mm_movemask_epi8(eqa2);
- if mask1 != 0 || mask2 != 0 {
- return Some(at + forward_pos2(mask1, mask2));
- }
-
- at += VECTOR_SIZE;
- let mask1 = _mm_movemask_epi8(eqb1);
- let mask2 = _mm_movemask_epi8(eqb2);
- return Some(at + forward_pos2(mask1, mask2));
- }
- ptr = ptr.add(loop_size);
- }
- while ptr <= end_ptr.sub(VECTOR_SIZE) {
- if let Some(i) = forward_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
- return Some(i);
- }
- ptr = ptr.add(VECTOR_SIZE);
- }
- if ptr < end_ptr {
- debug_assert!(sub(end_ptr, ptr) < VECTOR_SIZE);
- ptr = ptr.sub(VECTOR_SIZE - sub(end_ptr, ptr));
- debug_assert_eq!(sub(end_ptr, ptr), VECTOR_SIZE);
-
- return forward_search2(start_ptr, end_ptr, ptr, vn1, vn2);
- }
- None
-}
-
-#[target_feature(enable = "sse2")]
-pub unsafe fn memchr3(
- n1: u8,
- n2: u8,
- n3: u8,
- haystack: &[u8],
-) -> Option<usize> {
- let vn1 = _mm_set1_epi8(n1 as i8);
- let vn2 = _mm_set1_epi8(n2 as i8);
- let vn3 = _mm_set1_epi8(n3 as i8);
- let len = haystack.len();
- let loop_size = cmp::min(LOOP_SIZE2, len);
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = start_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr < end_ptr {
- if *ptr == n1 || *ptr == n2 || *ptr == n3 {
- return Some(sub(ptr, start_ptr));
- }
- ptr = ptr.offset(1);
- }
- return None;
- }
-
- if let Some(i) = forward_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3) {
- return Some(i);
- }
-
- ptr = ptr.add(VECTOR_SIZE - (start_ptr as usize & VECTOR_ALIGN));
- debug_assert!(ptr > start_ptr && end_ptr.sub(VECTOR_SIZE) >= start_ptr);
- while loop_size == LOOP_SIZE2 && ptr <= end_ptr.sub(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- let a = _mm_load_si128(ptr as *const __m128i);
- let b = _mm_load_si128(ptr.add(VECTOR_SIZE) as *const __m128i);
- let eqa1 = _mm_cmpeq_epi8(vn1, a);
- let eqb1 = _mm_cmpeq_epi8(vn1, b);
- let eqa2 = _mm_cmpeq_epi8(vn2, a);
- let eqb2 = _mm_cmpeq_epi8(vn2, b);
- let eqa3 = _mm_cmpeq_epi8(vn3, a);
- let eqb3 = _mm_cmpeq_epi8(vn3, b);
- let or1 = _mm_or_si128(eqa1, eqb1);
- let or2 = _mm_or_si128(eqa2, eqb2);
- let or3 = _mm_or_si128(eqa3, eqb3);
- let or4 = _mm_or_si128(or1, or2);
- let or5 = _mm_or_si128(or3, or4);
- if _mm_movemask_epi8(or5) != 0 {
- let mut at = sub(ptr, start_ptr);
- let mask1 = _mm_movemask_epi8(eqa1);
- let mask2 = _mm_movemask_epi8(eqa2);
- let mask3 = _mm_movemask_epi8(eqa3);
- if mask1 != 0 || mask2 != 0 || mask3 != 0 {
- return Some(at + forward_pos3(mask1, mask2, mask3));
- }
-
- at += VECTOR_SIZE;
- let mask1 = _mm_movemask_epi8(eqb1);
- let mask2 = _mm_movemask_epi8(eqb2);
- let mask3 = _mm_movemask_epi8(eqb3);
- return Some(at + forward_pos3(mask1, mask2, mask3));
- }
- ptr = ptr.add(loop_size);
- }
- while ptr <= end_ptr.sub(VECTOR_SIZE) {
- if let Some(i) =
- forward_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3)
- {
- return Some(i);
- }
- ptr = ptr.add(VECTOR_SIZE);
- }
- if ptr < end_ptr {
- debug_assert!(sub(end_ptr, ptr) < VECTOR_SIZE);
- ptr = ptr.sub(VECTOR_SIZE - sub(end_ptr, ptr));
- debug_assert_eq!(sub(end_ptr, ptr), VECTOR_SIZE);
-
- return forward_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3);
- }
- None
-}
-
-#[target_feature(enable = "sse2")]
-pub unsafe fn memrchr(n1: u8, haystack: &[u8]) -> Option<usize> {
- let vn1 = _mm_set1_epi8(n1 as i8);
- let len = haystack.len();
- let loop_size = cmp::min(LOOP_SIZE, len);
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = end_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr > start_ptr {
- ptr = ptr.offset(-1);
- if *ptr == n1 {
- return Some(sub(ptr, start_ptr));
- }
- }
- return None;
- }
-
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) = reverse_search1(start_ptr, end_ptr, ptr, vn1) {
- return Some(i);
- }
-
- ptr = (end_ptr as usize & !VECTOR_ALIGN) as *const u8;
- debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
- while loop_size == LOOP_SIZE && ptr >= start_ptr.add(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- ptr = ptr.sub(loop_size);
- let a = _mm_load_si128(ptr as *const __m128i);
- let b = _mm_load_si128(ptr.add(VECTOR_SIZE) as *const __m128i);
- let c = _mm_load_si128(ptr.add(2 * VECTOR_SIZE) as *const __m128i);
- let d = _mm_load_si128(ptr.add(3 * VECTOR_SIZE) as *const __m128i);
- let eqa = _mm_cmpeq_epi8(vn1, a);
- let eqb = _mm_cmpeq_epi8(vn1, b);
- let eqc = _mm_cmpeq_epi8(vn1, c);
- let eqd = _mm_cmpeq_epi8(vn1, d);
- let or1 = _mm_or_si128(eqa, eqb);
- let or2 = _mm_or_si128(eqc, eqd);
- let or3 = _mm_or_si128(or1, or2);
- if _mm_movemask_epi8(or3) != 0 {
- let mut at = sub(ptr.add(3 * VECTOR_SIZE), start_ptr);
- let mask = _mm_movemask_epi8(eqd);
- if mask != 0 {
- return Some(at + reverse_pos(mask));
- }
-
- at -= VECTOR_SIZE;
- let mask = _mm_movemask_epi8(eqc);
- if mask != 0 {
- return Some(at + reverse_pos(mask));
- }
-
- at -= VECTOR_SIZE;
- let mask = _mm_movemask_epi8(eqb);
- if mask != 0 {
- return Some(at + reverse_pos(mask));
- }
-
- at -= VECTOR_SIZE;
- let mask = _mm_movemask_epi8(eqa);
- debug_assert!(mask != 0);
- return Some(at + reverse_pos(mask));
- }
- }
- while ptr >= start_ptr.add(VECTOR_SIZE) {
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) = reverse_search1(start_ptr, end_ptr, ptr, vn1) {
- return Some(i);
- }
- }
- if ptr > start_ptr {
- debug_assert!(sub(ptr, start_ptr) < VECTOR_SIZE);
- return reverse_search1(start_ptr, end_ptr, start_ptr, vn1);
- }
- None
-}
-
-#[target_feature(enable = "sse2")]
-pub unsafe fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
- let vn1 = _mm_set1_epi8(n1 as i8);
- let vn2 = _mm_set1_epi8(n2 as i8);
- let len = haystack.len();
- let loop_size = cmp::min(LOOP_SIZE2, len);
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = end_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr > start_ptr {
- ptr = ptr.offset(-1);
- if *ptr == n1 || *ptr == n2 {
- return Some(sub(ptr, start_ptr));
- }
- }
- return None;
- }
-
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) = reverse_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
- return Some(i);
- }
-
- ptr = (end_ptr as usize & !VECTOR_ALIGN) as *const u8;
- debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
- while loop_size == LOOP_SIZE2 && ptr >= start_ptr.add(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- ptr = ptr.sub(loop_size);
- let a = _mm_load_si128(ptr as *const __m128i);
- let b = _mm_load_si128(ptr.add(VECTOR_SIZE) as *const __m128i);
- let eqa1 = _mm_cmpeq_epi8(vn1, a);
- let eqb1 = _mm_cmpeq_epi8(vn1, b);
- let eqa2 = _mm_cmpeq_epi8(vn2, a);
- let eqb2 = _mm_cmpeq_epi8(vn2, b);
- let or1 = _mm_or_si128(eqa1, eqb1);
- let or2 = _mm_or_si128(eqa2, eqb2);
- let or3 = _mm_or_si128(or1, or2);
- if _mm_movemask_epi8(or3) != 0 {
- let mut at = sub(ptr.add(VECTOR_SIZE), start_ptr);
- let mask1 = _mm_movemask_epi8(eqb1);
- let mask2 = _mm_movemask_epi8(eqb2);
- if mask1 != 0 || mask2 != 0 {
- return Some(at + reverse_pos2(mask1, mask2));
- }
-
- at -= VECTOR_SIZE;
- let mask1 = _mm_movemask_epi8(eqa1);
- let mask2 = _mm_movemask_epi8(eqa2);
- return Some(at + reverse_pos2(mask1, mask2));
- }
- }
- while ptr >= start_ptr.add(VECTOR_SIZE) {
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) = reverse_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
- return Some(i);
- }
- }
- if ptr > start_ptr {
- debug_assert!(sub(ptr, start_ptr) < VECTOR_SIZE);
- return reverse_search2(start_ptr, end_ptr, start_ptr, vn1, vn2);
- }
- None
-}
-
-#[target_feature(enable = "sse2")]
-pub unsafe fn memrchr3(
- n1: u8,
- n2: u8,
- n3: u8,
- haystack: &[u8],
-) -> Option<usize> {
- let vn1 = _mm_set1_epi8(n1 as i8);
- let vn2 = _mm_set1_epi8(n2 as i8);
- let vn3 = _mm_set1_epi8(n3 as i8);
- let len = haystack.len();
- let loop_size = cmp::min(LOOP_SIZE2, len);
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let mut ptr = end_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr > start_ptr {
- ptr = ptr.offset(-1);
- if *ptr == n1 || *ptr == n2 || *ptr == n3 {
- return Some(sub(ptr, start_ptr));
- }
- }
- return None;
- }
-
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) = reverse_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3) {
- return Some(i);
- }
-
- ptr = (end_ptr as usize & !VECTOR_ALIGN) as *const u8;
- debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
- while loop_size == LOOP_SIZE2 && ptr >= start_ptr.add(loop_size) {
- debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
-
- ptr = ptr.sub(loop_size);
- let a = _mm_load_si128(ptr as *const __m128i);
- let b = _mm_load_si128(ptr.add(VECTOR_SIZE) as *const __m128i);
- let eqa1 = _mm_cmpeq_epi8(vn1, a);
- let eqb1 = _mm_cmpeq_epi8(vn1, b);
- let eqa2 = _mm_cmpeq_epi8(vn2, a);
- let eqb2 = _mm_cmpeq_epi8(vn2, b);
- let eqa3 = _mm_cmpeq_epi8(vn3, a);
- let eqb3 = _mm_cmpeq_epi8(vn3, b);
- let or1 = _mm_or_si128(eqa1, eqb1);
- let or2 = _mm_or_si128(eqa2, eqb2);
- let or3 = _mm_or_si128(eqa3, eqb3);
- let or4 = _mm_or_si128(or1, or2);
- let or5 = _mm_or_si128(or3, or4);
- if _mm_movemask_epi8(or5) != 0 {
- let mut at = sub(ptr.add(VECTOR_SIZE), start_ptr);
- let mask1 = _mm_movemask_epi8(eqb1);
- let mask2 = _mm_movemask_epi8(eqb2);
- let mask3 = _mm_movemask_epi8(eqb3);
- if mask1 != 0 || mask2 != 0 || mask3 != 0 {
- return Some(at + reverse_pos3(mask1, mask2, mask3));
- }
-
- at -= VECTOR_SIZE;
- let mask1 = _mm_movemask_epi8(eqa1);
- let mask2 = _mm_movemask_epi8(eqa2);
- let mask3 = _mm_movemask_epi8(eqa3);
- return Some(at + reverse_pos3(mask1, mask2, mask3));
- }
- }
- while ptr >= start_ptr.add(VECTOR_SIZE) {
- ptr = ptr.sub(VECTOR_SIZE);
- if let Some(i) =
- reverse_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3)
- {
- return Some(i);
- }
- }
- if ptr > start_ptr {
- debug_assert!(sub(ptr, start_ptr) < VECTOR_SIZE);
- return reverse_search3(start_ptr, end_ptr, start_ptr, vn1, vn2, vn3);
- }
- None
-}
-
-#[target_feature(enable = "sse2")]
-pub unsafe fn forward_search1(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m128i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm_loadu_si128(ptr as *const __m128i);
- let mask = _mm_movemask_epi8(_mm_cmpeq_epi8(chunk, vn1));
- if mask != 0 {
- Some(sub(ptr, start_ptr) + forward_pos(mask))
- } else {
- None
- }
-}
-
-#[target_feature(enable = "sse2")]
-unsafe fn forward_search2(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m128i,
- vn2: __m128i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm_loadu_si128(ptr as *const __m128i);
- let eq1 = _mm_cmpeq_epi8(chunk, vn1);
- let eq2 = _mm_cmpeq_epi8(chunk, vn2);
- if _mm_movemask_epi8(_mm_or_si128(eq1, eq2)) != 0 {
- let mask1 = _mm_movemask_epi8(eq1);
- let mask2 = _mm_movemask_epi8(eq2);
- Some(sub(ptr, start_ptr) + forward_pos2(mask1, mask2))
- } else {
- None
- }
-}
-
-#[target_feature(enable = "sse2")]
-pub unsafe fn forward_search3(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m128i,
- vn2: __m128i,
- vn3: __m128i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm_loadu_si128(ptr as *const __m128i);
- let eq1 = _mm_cmpeq_epi8(chunk, vn1);
- let eq2 = _mm_cmpeq_epi8(chunk, vn2);
- let eq3 = _mm_cmpeq_epi8(chunk, vn3);
- let or = _mm_or_si128(eq1, eq2);
- if _mm_movemask_epi8(_mm_or_si128(or, eq3)) != 0 {
- let mask1 = _mm_movemask_epi8(eq1);
- let mask2 = _mm_movemask_epi8(eq2);
- let mask3 = _mm_movemask_epi8(eq3);
- Some(sub(ptr, start_ptr) + forward_pos3(mask1, mask2, mask3))
- } else {
- None
- }
-}
-
-#[target_feature(enable = "sse2")]
-unsafe fn reverse_search1(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m128i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm_loadu_si128(ptr as *const __m128i);
- let mask = _mm_movemask_epi8(_mm_cmpeq_epi8(vn1, chunk));
- if mask != 0 {
- Some(sub(ptr, start_ptr) + reverse_pos(mask))
- } else {
- None
- }
-}
-
-#[target_feature(enable = "sse2")]
-unsafe fn reverse_search2(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m128i,
- vn2: __m128i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm_loadu_si128(ptr as *const __m128i);
- let eq1 = _mm_cmpeq_epi8(chunk, vn1);
- let eq2 = _mm_cmpeq_epi8(chunk, vn2);
- if _mm_movemask_epi8(_mm_or_si128(eq1, eq2)) != 0 {
- let mask1 = _mm_movemask_epi8(eq1);
- let mask2 = _mm_movemask_epi8(eq2);
- Some(sub(ptr, start_ptr) + reverse_pos2(mask1, mask2))
- } else {
- None
- }
-}
-
-#[target_feature(enable = "sse2")]
-unsafe fn reverse_search3(
- start_ptr: *const u8,
- end_ptr: *const u8,
- ptr: *const u8,
- vn1: __m128i,
- vn2: __m128i,
- vn3: __m128i,
-) -> Option<usize> {
- debug_assert!(sub(end_ptr, start_ptr) >= VECTOR_SIZE);
- debug_assert!(start_ptr <= ptr);
- debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
-
- let chunk = _mm_loadu_si128(ptr as *const __m128i);
- let eq1 = _mm_cmpeq_epi8(chunk, vn1);
- let eq2 = _mm_cmpeq_epi8(chunk, vn2);
- let eq3 = _mm_cmpeq_epi8(chunk, vn3);
- let or = _mm_or_si128(eq1, eq2);
- if _mm_movemask_epi8(_mm_or_si128(or, eq3)) != 0 {
- let mask1 = _mm_movemask_epi8(eq1);
- let mask2 = _mm_movemask_epi8(eq2);
- let mask3 = _mm_movemask_epi8(eq3);
- Some(sub(ptr, start_ptr) + reverse_pos3(mask1, mask2, mask3))
- } else {
- None
- }
-}
-
-/// Compute the position of the first matching byte from the given mask. The
-/// position returned is always in the range [0, 15].
-///
-/// The mask given is expected to be the result of _mm_movemask_epi8.
-fn forward_pos(mask: i32) -> usize {
- // We are dealing with little endian here, where the most significant byte
- // is at a higher address. That means the least significant bit that is set
- // corresponds to the position of our first matching byte. That position
- // corresponds to the number of zeros after the least significant bit.
- mask.trailing_zeros() as usize
-}
-
-/// Compute the position of the first matching byte from the given masks. The
-/// position returned is always in the range [0, 15]. Each mask corresponds to
-/// the equality comparison of a single byte.
-///
-/// The masks given are expected to be the result of _mm_movemask_epi8, where
-/// at least one of the masks is non-zero (i.e., indicates a match).
-fn forward_pos2(mask1: i32, mask2: i32) -> usize {
- debug_assert!(mask1 != 0 || mask2 != 0);
-
- forward_pos(mask1 | mask2)
-}
-
-/// Compute the position of the first matching byte from the given masks. The
-/// position returned is always in the range [0, 15]. Each mask corresponds to
-/// the equality comparison of a single byte.
-///
-/// The masks given are expected to be the result of _mm_movemask_epi8, where
-/// at least one of the masks is non-zero (i.e., indicates a match).
-fn forward_pos3(mask1: i32, mask2: i32, mask3: i32) -> usize {
- debug_assert!(mask1 != 0 || mask2 != 0 || mask3 != 0);
-
- forward_pos(mask1 | mask2 | mask3)
-}
-
-/// Compute the position of the last matching byte from the given mask. The
-/// position returned is always in the range [0, 15].
-///
-/// The mask given is expected to be the result of _mm_movemask_epi8.
-fn reverse_pos(mask: i32) -> usize {
- // We are dealing with little endian here, where the most significant byte
- // is at a higher address. That means the most significant bit that is set
- // corresponds to the position of our last matching byte. The position from
- // the end of the mask is therefore the number of leading zeros in a 16
- // bit integer, and the position from the start of the mask is therefore
- // 16 - (leading zeros) - 1.
- VECTOR_SIZE - (mask as u16).leading_zeros() as usize - 1
-}
-
-/// Compute the position of the last matching byte from the given masks. The
-/// position returned is always in the range [0, 15]. Each mask corresponds to
-/// the equality comparison of a single byte.
-///
-/// The masks given are expected to be the result of _mm_movemask_epi8, where
-/// at least one of the masks is non-zero (i.e., indicates a match).
-fn reverse_pos2(mask1: i32, mask2: i32) -> usize {
- debug_assert!(mask1 != 0 || mask2 != 0);
-
- reverse_pos(mask1 | mask2)
-}
-
-/// Compute the position of the last matching byte from the given masks. The
-/// position returned is always in the range [0, 15]. Each mask corresponds to
-/// the equality comparison of a single byte.
-///
-/// The masks given are expected to be the result of _mm_movemask_epi8, where
-/// at least one of the masks is non-zero (i.e., indicates a match).
-fn reverse_pos3(mask1: i32, mask2: i32, mask3: i32) -> usize {
- debug_assert!(mask1 != 0 || mask2 != 0 || mask3 != 0);
-
- reverse_pos(mask1 | mask2 | mask3)
-}
-
-/// Subtract `b` from `a` and return the difference. `a` should be greater than
-/// or equal to `b`.
-fn sub(a: *const u8, b: *const u8) -> usize {
- debug_assert!(a >= b);
- (a as usize) - (b as usize)
-}
diff --git a/vendor/memchr/src/memchr/x86/sse42.rs b/vendor/memchr/src/memchr/x86/sse42.rs
deleted file mode 100644
index da38e50c2..000000000
--- a/vendor/memchr/src/memchr/x86/sse42.rs
+++ /dev/null
@@ -1,72 +0,0 @@
-// This code is unused. PCMPESTRI is gratuitously slow. I imagine it might
-// start winning with a hypothetical memchr4 (or greater). This technique might
-// also be good for exposing searches over ranges of bytes, but that departs
-// from the standard memchr API, so it's not clear whether we actually want
-// that or not.
-//
-// N.B. PCMPISTRI appears to be about twice as fast as PCMPESTRI, which is kind
-// of neat. Unfortunately, UTF-8 strings can contain NUL bytes, which means
-// I don't see a way of effectively using PCMPISTRI unless there's some fast
-// way to replace zero bytes with a byte that is not not a needle byte.
-
-use core::{arch::x86_64::*, mem::size_of};
-
-use x86::sse2;
-
-const VECTOR_SIZE: usize = size_of::<__m128i>();
-const CONTROL_ANY: i32 = _SIDD_UBYTE_OPS
- | _SIDD_CMP_EQUAL_ANY
- | _SIDD_POSITIVE_POLARITY
- | _SIDD_LEAST_SIGNIFICANT;
-
-#[target_feature(enable = "sse4.2")]
-pub unsafe fn memchr3(
- n1: u8,
- n2: u8,
- n3: u8,
- haystack: &[u8],
-) -> Option<usize> {
- let vn1 = _mm_set1_epi8(n1 as i8);
- let vn2 = _mm_set1_epi8(n2 as i8);
- let vn3 = _mm_set1_epi8(n3 as i8);
- let vn = _mm_setr_epi8(
- n1 as i8, n2 as i8, n3 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- );
- let len = haystack.len();
- let start_ptr = haystack.as_ptr();
- let end_ptr = haystack[haystack.len()..].as_ptr();
- let mut ptr = start_ptr;
-
- if haystack.len() < VECTOR_SIZE {
- while ptr < end_ptr {
- if *ptr == n1 || *ptr == n2 || *ptr == n3 {
- return Some(sub(ptr, start_ptr));
- }
- ptr = ptr.offset(1);
- }
- return None;
- }
- while ptr <= end_ptr.sub(VECTOR_SIZE) {
- let chunk = _mm_loadu_si128(ptr as *const __m128i);
- let res = _mm_cmpestri(vn, 3, chunk, 16, CONTROL_ANY);
- if res < 16 {
- return Some(sub(ptr, start_ptr) + res as usize);
- }
- ptr = ptr.add(VECTOR_SIZE);
- }
- if ptr < end_ptr {
- debug_assert!(sub(end_ptr, ptr) < VECTOR_SIZE);
- ptr = ptr.sub(VECTOR_SIZE - sub(end_ptr, ptr));
- debug_assert_eq!(sub(end_ptr, ptr), VECTOR_SIZE);
-
- return sse2::forward_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3);
- }
- None
-}
-
-/// Subtract `b` from `a` and return the difference. `a` should be greater than
-/// or equal to `b`.
-fn sub(a: *const u8, b: *const u8) -> usize {
- debug_assert!(a >= b);
- (a as usize) - (b as usize)
-}
diff --git a/vendor/memchr/src/memmem/genericsimd.rs b/vendor/memchr/src/memmem/genericsimd.rs
deleted file mode 100644
index 28bfdab88..000000000
--- a/vendor/memchr/src/memmem/genericsimd.rs
+++ /dev/null
@@ -1,266 +0,0 @@
-use core::mem::size_of;
-
-use crate::memmem::{util::memcmp, vector::Vector, NeedleInfo};
-
-/// The minimum length of a needle required for this algorithm. The minimum
-/// is 2 since a length of 1 should just use memchr and a length of 0 isn't
-/// a case handled by this searcher.
-pub(crate) const MIN_NEEDLE_LEN: usize = 2;
-
-/// The maximum length of a needle required for this algorithm.
-///
-/// In reality, there is no hard max here. The code below can handle any
-/// length needle. (Perhaps that suggests there are missing optimizations.)
-/// Instead, this is a heuristic and a bound guaranteeing our linear time
-/// complexity.
-///
-/// It is a heuristic because when a candidate match is found, memcmp is run.
-/// For very large needles with lots of false positives, memcmp can make the
-/// code run quite slow.
-///
-/// It is a bound because the worst case behavior with memcmp is multiplicative
-/// in the size of the needle and haystack, and we want to keep that additive.
-/// This bound ensures we still meet that bound theoretically, since it's just
-/// a constant. We aren't acting in bad faith here, memcmp on tiny needles
-/// is so fast that even in pathological cases (see pathological vector
-/// benchmarks), this is still just as fast or faster in practice.
-///
-/// This specific number was chosen by tweaking a bit and running benchmarks.
-/// The rare-medium-needle, for example, gets about 5% faster by using this
-/// algorithm instead of a prefilter-accelerated Two-Way. There's also a
-/// theoretical desire to keep this number reasonably low, to mitigate the
-/// impact of pathological cases. I did try 64, and some benchmarks got a
-/// little better, and others (particularly the pathological ones), got a lot
-/// worse. So... 32 it is?
-pub(crate) const MAX_NEEDLE_LEN: usize = 32;
-
-/// The implementation of the forward vector accelerated substring search.
-///
-/// This is extremely similar to the prefilter vector module by the same name.
-/// The key difference is that this is not a prefilter. Instead, it handles
-/// confirming its own matches. The trade off is that this only works with
-/// smaller needles. The speed up here is that an inlined memcmp on a tiny
-/// needle is very quick, even on pathological inputs. This is much better than
-/// combining a prefilter with Two-Way, where using Two-Way to confirm the
-/// match has higher latency.
-///
-/// So why not use this for all needles? We could, and it would probably work
-/// really well on most inputs. But its worst case is multiplicative and we
-/// want to guarantee worst case additive time. Some of the benchmarks try to
-/// justify this (see the pathological ones).
-///
-/// The prefilter variant of this has more comments. Also note that we only
-/// implement this for forward searches for now. If you have a compelling use
-/// case for accelerated reverse search, please file an issue.
-#[derive(Clone, Copy, Debug)]
-pub(crate) struct Forward {
- rare1i: u8,
- rare2i: u8,
-}
-
-impl Forward {
- /// Create a new "generic simd" forward searcher. If one could not be
- /// created from the given inputs, then None is returned.
- pub(crate) fn new(ninfo: &NeedleInfo, needle: &[u8]) -> Option<Forward> {
- let (rare1i, rare2i) = ninfo.rarebytes.as_rare_ordered_u8();
- // If the needle is too short or too long, give up. Also, give up
- // if the rare bytes detected are at the same position. (It likely
- // suggests a degenerate case, although it should technically not be
- // possible.)
- if needle.len() < MIN_NEEDLE_LEN
- || needle.len() > MAX_NEEDLE_LEN
- || rare1i == rare2i
- {
- return None;
- }
- Some(Forward { rare1i, rare2i })
- }
-
- /// Returns the minimum length of haystack that is needed for this searcher
- /// to work for a particular vector. Passing a haystack with a length
- /// smaller than this will cause `fwd_find` to panic.
- #[inline(always)]
- pub(crate) fn min_haystack_len<V: Vector>(&self) -> usize {
- self.rare2i as usize + size_of::<V>()
- }
-}
-
-/// Searches the given haystack for the given needle. The needle given should
-/// be the same as the needle that this searcher was initialized with.
-///
-/// # Panics
-///
-/// When the given haystack has a length smaller than `min_haystack_len`.
-///
-/// # Safety
-///
-/// Since this is meant to be used with vector functions, callers need to
-/// specialize this inside of a function with a `target_feature` attribute.
-/// Therefore, callers must ensure that whatever target feature is being used
-/// supports the vector functions that this function is specialized for. (For
-/// the specific vector functions used, see the Vector trait implementations.)
-#[inline(always)]
-pub(crate) unsafe fn fwd_find<V: Vector>(
- fwd: &Forward,
- haystack: &[u8],
- needle: &[u8],
-) -> Option<usize> {
- // It would be nice if we didn't have this check here, since the meta
- // searcher should handle it for us. But without this, I don't think we
- // guarantee that end_ptr.sub(needle.len()) won't result in UB. We could
- // put it as part of the safety contract, but it makes it more complicated
- // than necessary.
- if haystack.len() < needle.len() {
- return None;
- }
- let min_haystack_len = fwd.min_haystack_len::<V>();
- assert!(haystack.len() >= min_haystack_len, "haystack too small");
- debug_assert!(needle.len() <= haystack.len());
- debug_assert!(
- needle.len() >= MIN_NEEDLE_LEN,
- "needle must be at least {} bytes",
- MIN_NEEDLE_LEN,
- );
- debug_assert!(
- needle.len() <= MAX_NEEDLE_LEN,
- "needle must be at most {} bytes",
- MAX_NEEDLE_LEN,
- );
-
- let (rare1i, rare2i) = (fwd.rare1i as usize, fwd.rare2i as usize);
- let rare1chunk = V::splat(needle[rare1i]);
- let rare2chunk = V::splat(needle[rare2i]);
-
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let max_ptr = end_ptr.sub(min_haystack_len);
- let mut ptr = start_ptr;
-
- // N.B. I did experiment with unrolling the loop to deal with size(V)
- // bytes at a time and 2*size(V) bytes at a time. The double unroll was
- // marginally faster while the quadruple unroll was unambiguously slower.
- // In the end, I decided the complexity from unrolling wasn't worth it. I
- // used the memmem/krate/prebuilt/huge-en/ benchmarks to compare.
- while ptr <= max_ptr {
- let m = fwd_find_in_chunk(
- fwd, needle, ptr, end_ptr, rare1chunk, rare2chunk, !0,
- );
- if let Some(chunki) = m {
- return Some(matched(start_ptr, ptr, chunki));
- }
- ptr = ptr.add(size_of::<V>());
- }
- if ptr < end_ptr {
- let remaining = diff(end_ptr, ptr);
- debug_assert!(
- remaining < min_haystack_len,
- "remaining bytes should be smaller than the minimum haystack \
- length of {}, but there are {} bytes remaining",
- min_haystack_len,
- remaining,
- );
- if remaining < needle.len() {
- return None;
- }
- debug_assert!(
- max_ptr < ptr,
- "after main loop, ptr should have exceeded max_ptr",
- );
- let overlap = diff(ptr, max_ptr);
- debug_assert!(
- overlap > 0,
- "overlap ({}) must always be non-zero",
- overlap,
- );
- debug_assert!(
- overlap < size_of::<V>(),
- "overlap ({}) cannot possibly be >= than a vector ({})",
- overlap,
- size_of::<V>(),
- );
- // The mask has all of its bits set except for the first N least
- // significant bits, where N=overlap. This way, any matches that
- // occur in find_in_chunk within the overlap are automatically
- // ignored.
- let mask = !((1 << overlap) - 1);
- ptr = max_ptr;
- let m = fwd_find_in_chunk(
- fwd, needle, ptr, end_ptr, rare1chunk, rare2chunk, mask,
- );
- if let Some(chunki) = m {
- return Some(matched(start_ptr, ptr, chunki));
- }
- }
- None
-}
-
-/// Search for an occurrence of two rare bytes from the needle in the chunk
-/// pointed to by ptr, with the end of the haystack pointed to by end_ptr. When
-/// an occurrence is found, memcmp is run to check if a match occurs at the
-/// corresponding position.
-///
-/// rare1chunk and rare2chunk correspond to vectors with the rare1 and rare2
-/// bytes repeated in each 8-bit lane, respectively.
-///
-/// mask should have bits set corresponding the positions in the chunk in which
-/// matches are considered. This is only used for the last vector load where
-/// the beginning of the vector might have overlapped with the last load in
-/// the main loop. The mask lets us avoid visiting positions that have already
-/// been discarded as matches.
-///
-/// # Safety
-///
-/// It must be safe to do an unaligned read of size(V) bytes starting at both
-/// (ptr + rare1i) and (ptr + rare2i). It must also be safe to do unaligned
-/// loads on ptr up to (end_ptr - needle.len()).
-#[inline(always)]
-unsafe fn fwd_find_in_chunk<V: Vector>(
- fwd: &Forward,
- needle: &[u8],
- ptr: *const u8,
- end_ptr: *const u8,
- rare1chunk: V,
- rare2chunk: V,
- mask: u32,
-) -> Option<usize> {
- let chunk0 = V::load_unaligned(ptr.add(fwd.rare1i as usize));
- let chunk1 = V::load_unaligned(ptr.add(fwd.rare2i as usize));
-
- let eq0 = chunk0.cmpeq(rare1chunk);
- let eq1 = chunk1.cmpeq(rare2chunk);
-
- let mut match_offsets = eq0.and(eq1).movemask() & mask;
- while match_offsets != 0 {
- let offset = match_offsets.trailing_zeros() as usize;
- let ptr = ptr.add(offset);
- if end_ptr.sub(needle.len()) < ptr {
- return None;
- }
- let chunk = core::slice::from_raw_parts(ptr, needle.len());
- if memcmp(needle, chunk) {
- return Some(offset);
- }
- match_offsets &= match_offsets - 1;
- }
- None
-}
-
-/// Accepts a chunk-relative offset and returns a haystack relative offset
-/// after updating the prefilter state.
-///
-/// See the same function with the same name in the prefilter variant of this
-/// algorithm to learned why it's tagged with inline(never). Even here, where
-/// the function is simpler, inlining it leads to poorer codegen. (Although
-/// it does improve some benchmarks, like prebuiltiter/huge-en/common-you.)
-#[cold]
-#[inline(never)]
-fn matched(start_ptr: *const u8, ptr: *const u8, chunki: usize) -> usize {
- diff(ptr, start_ptr) + chunki
-}
-
-/// Subtract `b` from `a` and return the difference. `a` must be greater than
-/// or equal to `b`.
-fn diff(a: *const u8, b: *const u8) -> usize {
- debug_assert!(a >= b);
- (a as usize) - (b as usize)
-}
diff --git a/vendor/memchr/src/memmem/mod.rs b/vendor/memchr/src/memmem/mod.rs
index e1cd1aec7..1a2a7e10c 100644
--- a/vendor/memchr/src/memmem/mod.rs
+++ b/vendor/memchr/src/memmem/mod.rs
@@ -66,99 +66,25 @@ assert_eq!(None, finder.find(b"quux baz bar"));
```
*/
-pub use self::prefilter::Prefilter;
+pub use crate::memmem::searcher::PrefilterConfig as Prefilter;
+
+// This is exported here for use in the crate::arch::all::twoway
+// implementation. This is essentially an abstraction breaker. Namely, the
+// public API of twoway doesn't support providing a prefilter, but its crate
+// internal API does. The main reason for this is that I didn't want to do the
+// API design required to support it without a concrete use case.
+pub(crate) use crate::memmem::searcher::Pre;
use crate::{
- cow::CowBytes,
- memmem::{
- prefilter::{Pre, PrefilterFn, PrefilterState},
- rabinkarp::NeedleHash,
- rarebytes::RareNeedleBytes,
+ arch::all::{
+ packedpair::{DefaultFrequencyRank, HeuristicFrequencyRank},
+ rabinkarp,
},
+ cow::CowBytes,
+ memmem::searcher::{PrefilterState, Searcher, SearcherRev},
};
-/// Defines a suite of quickcheck properties for forward and reverse
-/// substring searching.
-///
-/// This is defined in this specific spot so that it can be used freely among
-/// the different substring search implementations. I couldn't be bothered to
-/// fight with the macro-visibility rules enough to figure out how to stuff it
-/// somewhere more convenient.
-#[cfg(all(test, feature = "std"))]
-macro_rules! define_memmem_quickcheck_tests {
- ($fwd:expr, $rev:expr) => {
- use crate::memmem::proptests;
-
- quickcheck::quickcheck! {
- fn qc_fwd_prefix_is_substring(bs: Vec<u8>) -> bool {
- proptests::prefix_is_substring(false, &bs, $fwd)
- }
-
- fn qc_fwd_suffix_is_substring(bs: Vec<u8>) -> bool {
- proptests::suffix_is_substring(false, &bs, $fwd)
- }
-
- fn qc_fwd_matches_naive(
- haystack: Vec<u8>,
- needle: Vec<u8>
- ) -> bool {
- proptests::matches_naive(false, &haystack, &needle, $fwd)
- }
-
- fn qc_rev_prefix_is_substring(bs: Vec<u8>) -> bool {
- proptests::prefix_is_substring(true, &bs, $rev)
- }
-
- fn qc_rev_suffix_is_substring(bs: Vec<u8>) -> bool {
- proptests::suffix_is_substring(true, &bs, $rev)
- }
-
- fn qc_rev_matches_naive(
- haystack: Vec<u8>,
- needle: Vec<u8>
- ) -> bool {
- proptests::matches_naive(true, &haystack, &needle, $rev)
- }
- }
- };
-}
-
-/// Defines a suite of "simple" hand-written tests for a substring
-/// implementation.
-///
-/// This is defined here for the same reason that
-/// define_memmem_quickcheck_tests is defined here.
-#[cfg(test)]
-macro_rules! define_memmem_simple_tests {
- ($fwd:expr, $rev:expr) => {
- use crate::memmem::testsimples;
-
- #[test]
- fn simple_forward() {
- testsimples::run_search_tests_fwd($fwd);
- }
-
- #[test]
- fn simple_reverse() {
- testsimples::run_search_tests_rev($rev);
- }
- };
-}
-
-mod byte_frequencies;
-#[cfg(memchr_runtime_simd)]
-mod genericsimd;
-mod prefilter;
-mod rabinkarp;
-mod rarebytes;
-mod twoway;
-mod util;
-#[cfg(memchr_runtime_simd)]
-mod vector;
-#[cfg(all(memchr_runtime_wasm128))]
-mod wasm;
-#[cfg(all(not(miri), target_arch = "x86_64", memchr_runtime_simd))]
-mod x86;
+mod searcher;
/// Returns an iterator over all non-overlapping occurrences of a substring in
/// a haystack.
@@ -258,7 +184,7 @@ pub fn rfind_iter<'h, 'n, N: 'n + ?Sized + AsRef<[u8]>>(
#[inline]
pub fn find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
if haystack.len() < 64 {
- rabinkarp::find(haystack, needle)
+ rabinkarp::Finder::new(needle).find(haystack, needle)
} else {
Finder::new(needle).find(haystack)
}
@@ -295,7 +221,7 @@ pub fn find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
#[inline]
pub fn rfind(haystack: &[u8], needle: &[u8]) -> Option<usize> {
if haystack.len() < 64 {
- rabinkarp::rfind(haystack, needle)
+ rabinkarp::FinderRev::new(needle).rfind(haystack, needle)
} else {
FinderRev::new(needle).rfind(haystack)
}
@@ -321,7 +247,7 @@ impl<'h, 'n> FindIter<'h, 'n> {
haystack: &'h [u8],
finder: Finder<'n>,
) -> FindIter<'h, 'n> {
- let prestate = finder.searcher.prefilter_state();
+ let prestate = PrefilterState::new();
FindIter { haystack, prestate, finder, pos: 0 }
}
@@ -331,8 +257,8 @@ impl<'h, 'n> FindIter<'h, 'n> {
/// If this is already an owned iterator, then this is a no-op. Otherwise,
/// this copies the needle.
///
- /// This is only available when the `std` feature is enabled.
- #[cfg(feature = "std")]
+ /// This is only available when the `alloc` feature is enabled.
+ #[cfg(feature = "alloc")]
#[inline]
pub fn into_owned(self) -> FindIter<'h, 'static> {
FindIter {
@@ -348,20 +274,32 @@ impl<'h, 'n> Iterator for FindIter<'h, 'n> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
- if self.pos > self.haystack.len() {
- return None;
- }
- let result = self
- .finder
- .searcher
- .find(&mut self.prestate, &self.haystack[self.pos..]);
- match result {
- None => None,
- Some(i) => {
- let pos = self.pos + i;
- self.pos = pos + core::cmp::max(1, self.finder.needle().len());
- Some(pos)
- }
+ let needle = self.finder.needle();
+ let haystack = self.haystack.get(self.pos..)?;
+ let idx =
+ self.finder.searcher.find(&mut self.prestate, haystack, needle)?;
+
+ let pos = self.pos + idx;
+ self.pos = pos + needle.len().max(1);
+
+ Some(pos)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // The largest possible number of non-overlapping matches is the
+ // quotient of the haystack and the needle (or the length of the
+ // haystack, if the needle is empty)
+ match self.haystack.len().checked_sub(self.pos) {
+ None => (0, Some(0)),
+ Some(haystack_len) => match self.finder.needle().len() {
+ // Empty needles always succeed and match at every point
+ // (including the very end)
+ 0 => (
+ haystack_len.saturating_add(1),
+ haystack_len.checked_add(1),
+ ),
+ needle_len => (0, Some(haystack_len / needle_len)),
+ },
}
}
}
@@ -398,7 +336,7 @@ impl<'h, 'n> FindRevIter<'h, 'n> {
/// this copies the needle.
///
/// This is only available when the `std` feature is enabled.
- #[cfg(feature = "std")]
+ #[cfg(feature = "alloc")]
#[inline]
pub fn into_owned(self) -> FindRevIter<'h, 'static> {
FindRevIter {
@@ -447,7 +385,8 @@ impl<'h, 'n> Iterator for FindRevIter<'h, 'n> {
/// the lifetime of its needle.
#[derive(Clone, Debug)]
pub struct Finder<'n> {
- searcher: Searcher<'n>,
+ needle: CowBytes<'n>,
+ searcher: Searcher,
}
impl<'n> Finder<'n> {
@@ -481,8 +420,11 @@ impl<'n> Finder<'n> {
/// assert_eq!(Some(4), Finder::new("bar").find(haystack));
/// assert_eq!(None, Finder::new("quux").find(haystack));
/// ```
+ #[inline]
pub fn find(&self, haystack: &[u8]) -> Option<usize> {
- self.searcher.find(&mut self.searcher.prefilter_state(), haystack)
+ let mut prestate = PrefilterState::new();
+ let needle = self.needle.as_slice();
+ self.searcher.find(&mut prestate, haystack, needle)
}
/// Returns an iterator over all occurrences of a substring in a haystack.
@@ -525,11 +467,14 @@ impl<'n> Finder<'n> {
/// If this is already an owned finder, then this is a no-op. Otherwise,
/// this copies the needle.
///
- /// This is only available when the `std` feature is enabled.
- #[cfg(feature = "std")]
+ /// This is only available when the `alloc` feature is enabled.
+ #[cfg(feature = "alloc")]
#[inline]
pub fn into_owned(self) -> Finder<'static> {
- Finder { searcher: self.searcher.into_owned() }
+ Finder {
+ needle: self.needle.into_owned(),
+ searcher: self.searcher.clone(),
+ }
}
/// Convert this finder into its borrowed variant.
@@ -544,7 +489,10 @@ impl<'n> Finder<'n> {
/// shorter of the two.
#[inline]
pub fn as_ref(&self) -> Finder<'_> {
- Finder { searcher: self.searcher.as_ref() }
+ Finder {
+ needle: CowBytes::new(self.needle()),
+ searcher: self.searcher.clone(),
+ }
}
/// Returns the needle that this finder searches for.
@@ -555,7 +503,7 @@ impl<'n> Finder<'n> {
/// needle returned must necessarily be the shorter of the two.
#[inline]
pub fn needle(&self) -> &[u8] {
- self.searcher.needle()
+ self.needle.as_slice()
}
}
@@ -574,7 +522,8 @@ impl<'n> Finder<'n> {
/// the lifetime of its needle.
#[derive(Clone, Debug)]
pub struct FinderRev<'n> {
- searcher: SearcherRev<'n>,
+ needle: CowBytes<'n>,
+ searcher: SearcherRev,
}
impl<'n> FinderRev<'n> {
@@ -612,7 +561,7 @@ impl<'n> FinderRev<'n> {
/// assert_eq!(None, FinderRev::new("quux").rfind(haystack));
/// ```
pub fn rfind<B: AsRef<[u8]>>(&self, haystack: B) -> Option<usize> {
- self.searcher.rfind(haystack.as_ref())
+ self.searcher.rfind(haystack.as_ref(), self.needle.as_slice())
}
/// Returns a reverse iterator over all occurrences of a substring in a
@@ -657,10 +606,13 @@ impl<'n> FinderRev<'n> {
/// this copies the needle.
///
/// This is only available when the `std` feature is enabled.
- #[cfg(feature = "std")]
+ #[cfg(feature = "alloc")]
#[inline]
pub fn into_owned(self) -> FinderRev<'static> {
- FinderRev { searcher: self.searcher.into_owned() }
+ FinderRev {
+ needle: self.needle.into_owned(),
+ searcher: self.searcher.clone(),
+ }
}
/// Convert this finder into its borrowed variant.
@@ -675,7 +627,10 @@ impl<'n> FinderRev<'n> {
/// shorter of the two.
#[inline]
pub fn as_ref(&self) -> FinderRev<'_> {
- FinderRev { searcher: self.searcher.as_ref() }
+ FinderRev {
+ needle: CowBytes::new(self.needle()),
+ searcher: self.searcher.clone(),
+ }
}
/// Returns the needle that this finder searches for.
@@ -686,7 +641,7 @@ impl<'n> FinderRev<'n> {
/// needle returned must necessarily be the shorter of the two.
#[inline]
pub fn needle(&self) -> &[u8] {
- self.searcher.needle()
+ self.needle.as_slice()
}
}
@@ -697,7 +652,7 @@ impl<'n> FinderRev<'n> {
/// heuristic prefilters used to speed up certain searches.
#[derive(Clone, Debug, Default)]
pub struct FinderBuilder {
- config: SearcherConfig,
+ prefilter: Prefilter,
}
impl FinderBuilder {
@@ -712,7 +667,26 @@ impl FinderBuilder {
&self,
needle: &'n B,
) -> Finder<'n> {
- Finder { searcher: Searcher::new(self.config, needle.as_ref()) }
+ self.build_forward_with_ranker(DefaultFrequencyRank, needle)
+ }
+
+ /// Build a forward finder using the given needle and a custom heuristic for
+ /// determining the frequency of a given byte in the dataset.
+ /// See [`HeuristicFrequencyRank`] for more details.
+ pub fn build_forward_with_ranker<
+ 'n,
+ R: HeuristicFrequencyRank,
+ B: ?Sized + AsRef<[u8]>,
+ >(
+ &self,
+ ranker: R,
+ needle: &'n B,
+ ) -> Finder<'n> {
+ let needle = needle.as_ref();
+ Finder {
+ needle: CowBytes::new(needle),
+ searcher: Searcher::new(self.prefilter, ranker, needle),
+ }
}
/// Build a reverse finder using the given needle from the current
@@ -721,7 +695,11 @@ impl FinderBuilder {
&self,
needle: &'n B,
) -> FinderRev<'n> {
- FinderRev { searcher: SearcherRev::new(needle.as_ref()) }
+ let needle = needle.as_ref();
+ FinderRev {
+ needle: CowBytes::new(needle),
+ searcher: SearcherRev::new(needle),
+ }
}
/// Configure the prefilter setting for the finder.
@@ -729,593 +707,31 @@ impl FinderBuilder {
/// See the documentation for [`Prefilter`] for more discussion on why
/// you might want to configure this.
pub fn prefilter(&mut self, prefilter: Prefilter) -> &mut FinderBuilder {
- self.config.prefilter = prefilter;
+ self.prefilter = prefilter;
self
}
}
-/// The internal implementation of a forward substring searcher.
-///
-/// The reality is that this is a "meta" searcher. Namely, depending on a
-/// variety of parameters (CPU support, target, needle size, haystack size and
-/// even dynamic properties such as prefilter effectiveness), the actual
-/// algorithm employed to do substring search may change.
-#[derive(Clone, Debug)]
-struct Searcher<'n> {
- /// The actual needle we're searching for.
- ///
- /// A CowBytes is like a Cow<[u8]>, except in no_std environments, it is
- /// specialized to a single variant (the borrowed form).
- needle: CowBytes<'n>,
- /// A collection of facts computed on the needle that are useful for more
- /// than one substring search algorithm.
- ninfo: NeedleInfo,
- /// A prefilter function, if it was deemed appropriate.
- ///
- /// Some substring search implementations (like Two-Way) benefit greatly
- /// if we can quickly find candidate starting positions for a match.
- prefn: Option<PrefilterFn>,
- /// The actual substring implementation in use.
- kind: SearcherKind,
-}
-
-/// A collection of facts computed about a search needle.
-///
-/// We group these things together because it's useful to be able to hand them
-/// to prefilters or substring algorithms that want them.
-#[derive(Clone, Copy, Debug)]
-pub(crate) struct NeedleInfo {
- /// The offsets of "rare" bytes detected in the needle.
- ///
- /// This is meant to be a heuristic in order to maximize the effectiveness
- /// of vectorized code. Namely, vectorized code tends to focus on only
- /// one or two bytes. If we pick bytes from the needle that occur
- /// infrequently, then more time will be spent in the vectorized code and
- /// will likely make the overall search (much) faster.
- ///
- /// Of course, this is only a heuristic based on a background frequency
- /// distribution of bytes. But it tends to work very well in practice.
- pub(crate) rarebytes: RareNeedleBytes,
- /// A Rabin-Karp hash of the needle.
- ///
- /// This is store here instead of in a more specific Rabin-Karp search
- /// since Rabin-Karp may be used even if another SearchKind corresponds
- /// to some other search implementation. e.g., If measurements suggest RK
- /// is faster in some cases or if a search implementation can't handle
- /// particularly small haystack. (Moreover, we cannot use RK *generally*,
- /// since its worst case time is multiplicative. Instead, we only use it
- /// some small haystacks, where "small" is a constant.)
- pub(crate) nhash: NeedleHash,
-}
-
-/// Configuration for substring search.
-#[derive(Clone, Copy, Debug, Default)]
-struct SearcherConfig {
- /// This permits changing the behavior of the prefilter, since it can have
- /// a variable impact on performance.
- prefilter: Prefilter,
-}
-
-#[derive(Clone, Debug)]
-enum SearcherKind {
- /// A special case for empty needles. An empty needle always matches, even
- /// in an empty haystack.
- Empty,
- /// This is used whenever the needle is a single byte. In this case, we
- /// always use memchr.
- OneByte(u8),
- /// Two-Way is the generic work horse and is what provides our additive
- /// linear time guarantee. In general, it's used when the needle is bigger
- /// than 8 bytes or so.
- TwoWay(twoway::Forward),
- #[cfg(all(not(miri), target_arch = "x86_64", memchr_runtime_simd))]
- GenericSIMD128(x86::sse::Forward),
- #[cfg(memchr_runtime_wasm128)]
- GenericSIMD128(wasm::Forward),
- #[cfg(all(not(miri), target_arch = "x86_64", memchr_runtime_simd))]
- GenericSIMD256(x86::avx::Forward),
-}
-
-impl<'n> Searcher<'n> {
- fn new(config: SearcherConfig, needle: &'n [u8]) -> Searcher<'n> {
- use self::SearcherKind::*;
-
- let ninfo = NeedleInfo::new(needle);
- let mk = |kind: SearcherKind| {
- let prefn = prefilter::forward(
- &config.prefilter,
- &ninfo.rarebytes,
- needle,
- );
- Searcher { needle: CowBytes::new(needle), ninfo, prefn, kind }
- };
- if needle.len() == 0 {
- return mk(Empty);
- }
- if needle.len() == 1 {
- return mk(OneByte(needle[0]));
- }
- #[cfg(all(not(miri), target_arch = "x86_64", memchr_runtime_simd))]
- {
- if let Some(fwd) = x86::avx::Forward::new(&ninfo, needle) {
- return mk(GenericSIMD256(fwd));
- } else if let Some(fwd) = x86::sse::Forward::new(&ninfo, needle) {
- return mk(GenericSIMD128(fwd));
- }
- }
- #[cfg(all(target_arch = "wasm32", memchr_runtime_simd))]
- {
- if let Some(fwd) = wasm::Forward::new(&ninfo, needle) {
- return mk(GenericSIMD128(fwd));
- }
- }
-
- mk(TwoWay(twoway::Forward::new(needle)))
- }
-
- /// Return a fresh prefilter state that can be used with this searcher.
- /// A prefilter state is used to track the effectiveness of a searcher's
- /// prefilter for speeding up searches. Therefore, the prefilter state
- /// should generally be reused on subsequent searches (such as in an
- /// iterator). For searches on a different haystack, then a new prefilter
- /// state should be used.
- ///
- /// This always initializes a valid (but possibly inert) prefilter state
- /// even if this searcher does not have a prefilter enabled.
- fn prefilter_state(&self) -> PrefilterState {
- if self.prefn.is_none() {
- PrefilterState::inert()
- } else {
- PrefilterState::new()
- }
- }
-
- fn needle(&self) -> &[u8] {
- self.needle.as_slice()
- }
-
- fn as_ref(&self) -> Searcher<'_> {
- use self::SearcherKind::*;
-
- let kind = match self.kind {
- Empty => Empty,
- OneByte(b) => OneByte(b),
- TwoWay(tw) => TwoWay(tw),
- #[cfg(all(not(miri), memchr_runtime_simd))]
- GenericSIMD128(gs) => GenericSIMD128(gs),
- #[cfg(all(
- not(miri),
- target_arch = "x86_64",
- memchr_runtime_simd
- ))]
- GenericSIMD256(gs) => GenericSIMD256(gs),
- };
- Searcher {
- needle: CowBytes::new(self.needle()),
- ninfo: self.ninfo,
- prefn: self.prefn,
- kind,
- }
- }
-
- #[cfg(feature = "std")]
- fn into_owned(self) -> Searcher<'static> {
- use self::SearcherKind::*;
-
- let kind = match self.kind {
- Empty => Empty,
- OneByte(b) => OneByte(b),
- TwoWay(tw) => TwoWay(tw),
- #[cfg(all(not(miri), memchr_runtime_simd))]
- GenericSIMD128(gs) => GenericSIMD128(gs),
- #[cfg(all(
- not(miri),
- target_arch = "x86_64",
- memchr_runtime_simd
- ))]
- GenericSIMD256(gs) => GenericSIMD256(gs),
- };
- Searcher {
- needle: self.needle.into_owned(),
- ninfo: self.ninfo,
- prefn: self.prefn,
- kind,
- }
- }
-
- /// Implements forward substring search by selecting the implementation
- /// chosen at construction and executing it on the given haystack with the
- /// prefilter's current state of effectiveness.
- #[inline(always)]
- fn find(
- &self,
- state: &mut PrefilterState,
- haystack: &[u8],
- ) -> Option<usize> {
- use self::SearcherKind::*;
-
- let needle = self.needle();
- if haystack.len() < needle.len() {
- return None;
- }
- match self.kind {
- Empty => Some(0),
- OneByte(b) => crate::memchr(b, haystack),
- TwoWay(ref tw) => {
- // For very short haystacks (e.g., where the prefilter probably
- // can't run), it's faster to just run RK.
- if rabinkarp::is_fast(haystack, needle) {
- rabinkarp::find_with(&self.ninfo.nhash, haystack, needle)
- } else {
- self.find_tw(tw, state, haystack, needle)
- }
- }
- #[cfg(all(not(miri), memchr_runtime_simd))]
- GenericSIMD128(ref gs) => {
- // The SIMD matcher can't handle particularly short haystacks,
- // so we fall back to RK in these cases.
- if haystack.len() < gs.min_haystack_len() {
- rabinkarp::find_with(&self.ninfo.nhash, haystack, needle)
- } else {
- gs.find(haystack, needle)
- }
- }
- #[cfg(all(
- not(miri),
- target_arch = "x86_64",
- memchr_runtime_simd
- ))]
- GenericSIMD256(ref gs) => {
- // The SIMD matcher can't handle particularly short haystacks,
- // so we fall back to RK in these cases.
- if haystack.len() < gs.min_haystack_len() {
- rabinkarp::find_with(&self.ninfo.nhash, haystack, needle)
- } else {
- gs.find(haystack, needle)
- }
- }
- }
- }
-
- /// Calls Two-Way on the given haystack/needle.
- ///
- /// This is marked as unlineable since it seems to have a better overall
- /// effect on benchmarks. However, this is one of those cases where
- /// inlining it results an improvement in other benchmarks too, so I
- /// suspect we just don't have enough data yet to make the right call here.
- ///
- /// I suspect the main problem is that this function contains two different
- /// inlined copies of Two-Way: one with and one without prefilters enabled.
- #[inline(never)]
- fn find_tw(
- &self,
- tw: &twoway::Forward,
- state: &mut PrefilterState,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- if let Some(prefn) = self.prefn {
- // We used to look at the length of a haystack here. That is, if
- // it was too small, then don't bother with the prefilter. But two
- // things changed: the prefilter falls back to memchr for small
- // haystacks, and, above, Rabin-Karp is employed for tiny haystacks
- // anyway.
- if state.is_effective() {
- let mut pre = Pre { state, prefn, ninfo: &self.ninfo };
- return tw.find(Some(&mut pre), haystack, needle);
- }
- }
- tw.find(None, haystack, needle)
- }
-}
-
-impl NeedleInfo {
- pub(crate) fn new(needle: &[u8]) -> NeedleInfo {
- NeedleInfo {
- rarebytes: RareNeedleBytes::forward(needle),
- nhash: NeedleHash::forward(needle),
- }
- }
-}
-
-/// The internal implementation of a reverse substring searcher.
-///
-/// See the forward searcher docs for more details. Currently, the reverse
-/// searcher is considerably simpler since it lacks prefilter support. This
-/// was done because it adds a lot of code, and more surface area to test. And
-/// in particular, it's not clear whether a prefilter on reverse searching is
-/// worth it. (If you have a compelling use case, please file an issue!)
-#[derive(Clone, Debug)]
-struct SearcherRev<'n> {
- /// The actual needle we're searching for.
- needle: CowBytes<'n>,
- /// A Rabin-Karp hash of the needle.
- nhash: NeedleHash,
- /// The actual substring implementation in use.
- kind: SearcherRevKind,
-}
-
-#[derive(Clone, Debug)]
-enum SearcherRevKind {
- /// A special case for empty needles. An empty needle always matches, even
- /// in an empty haystack.
- Empty,
- /// This is used whenever the needle is a single byte. In this case, we
- /// always use memchr.
- OneByte(u8),
- /// Two-Way is the generic work horse and is what provides our additive
- /// linear time guarantee. In general, it's used when the needle is bigger
- /// than 8 bytes or so.
- TwoWay(twoway::Reverse),
-}
-
-impl<'n> SearcherRev<'n> {
- fn new(needle: &'n [u8]) -> SearcherRev<'n> {
- use self::SearcherRevKind::*;
-
- let kind = if needle.len() == 0 {
- Empty
- } else if needle.len() == 1 {
- OneByte(needle[0])
- } else {
- TwoWay(twoway::Reverse::new(needle))
- };
- SearcherRev {
- needle: CowBytes::new(needle),
- nhash: NeedleHash::reverse(needle),
- kind,
- }
- }
-
- fn needle(&self) -> &[u8] {
- self.needle.as_slice()
- }
-
- fn as_ref(&self) -> SearcherRev<'_> {
- use self::SearcherRevKind::*;
-
- let kind = match self.kind {
- Empty => Empty,
- OneByte(b) => OneByte(b),
- TwoWay(tw) => TwoWay(tw),
- };
- SearcherRev {
- needle: CowBytes::new(self.needle()),
- nhash: self.nhash,
- kind,
- }
- }
-
- #[cfg(feature = "std")]
- fn into_owned(self) -> SearcherRev<'static> {
- use self::SearcherRevKind::*;
-
- let kind = match self.kind {
- Empty => Empty,
- OneByte(b) => OneByte(b),
- TwoWay(tw) => TwoWay(tw),
- };
- SearcherRev {
- needle: self.needle.into_owned(),
- nhash: self.nhash,
- kind,
- }
- }
-
- /// Implements reverse substring search by selecting the implementation
- /// chosen at construction and executing it on the given haystack with the
- /// prefilter's current state of effectiveness.
- #[inline(always)]
- fn rfind(&self, haystack: &[u8]) -> Option<usize> {
- use self::SearcherRevKind::*;
-
- let needle = self.needle();
- if haystack.len() < needle.len() {
- return None;
- }
- match self.kind {
- Empty => Some(haystack.len()),
- OneByte(b) => crate::memrchr(b, haystack),
- TwoWay(ref tw) => {
- // For very short haystacks (e.g., where the prefilter probably
- // can't run), it's faster to just run RK.
- if rabinkarp::is_fast(haystack, needle) {
- rabinkarp::rfind_with(&self.nhash, haystack, needle)
- } else {
- tw.rfind(haystack, needle)
- }
- }
- }
- }
-}
-
-/// This module defines some generic quickcheck properties useful for testing
-/// any substring search algorithm. It also runs those properties for the
-/// top-level public API memmem routines. (The properties are also used to
-/// test various substring search implementations more granularly elsewhere as
-/// well.)
-#[cfg(all(test, feature = "std", not(miri)))]
-mod proptests {
- // N.B. This defines the quickcheck tests using the properties defined
- // below. Because of macro-visibility weirdness, the actual macro is
- // defined at the top of this file.
- define_memmem_quickcheck_tests!(super::find, super::rfind);
-
- /// Check that every prefix of the given byte string is a substring.
- pub(crate) fn prefix_is_substring(
- reverse: bool,
- bs: &[u8],
- mut search: impl FnMut(&[u8], &[u8]) -> Option<usize>,
- ) -> bool {
- if bs.is_empty() {
- return true;
- }
- for i in 0..(bs.len() - 1) {
- let prefix = &bs[..i];
- if reverse {
- assert_eq!(naive_rfind(bs, prefix), search(bs, prefix));
- } else {
- assert_eq!(naive_find(bs, prefix), search(bs, prefix));
- }
- }
- true
- }
-
- /// Check that every suffix of the given byte string is a substring.
- pub(crate) fn suffix_is_substring(
- reverse: bool,
- bs: &[u8],
- mut search: impl FnMut(&[u8], &[u8]) -> Option<usize>,
- ) -> bool {
- if bs.is_empty() {
- return true;
- }
- for i in 0..(bs.len() - 1) {
- let suffix = &bs[i..];
- if reverse {
- assert_eq!(naive_rfind(bs, suffix), search(bs, suffix));
- } else {
- assert_eq!(naive_find(bs, suffix), search(bs, suffix));
- }
- }
- true
- }
-
- /// Check that naive substring search matches the result of the given search
- /// algorithm.
- pub(crate) fn matches_naive(
- reverse: bool,
- haystack: &[u8],
- needle: &[u8],
- mut search: impl FnMut(&[u8], &[u8]) -> Option<usize>,
- ) -> bool {
- if reverse {
- naive_rfind(haystack, needle) == search(haystack, needle)
- } else {
- naive_find(haystack, needle) == search(haystack, needle)
- }
- }
-
- /// Naively search forwards for the given needle in the given haystack.
- fn naive_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
- if needle.is_empty() {
- return Some(0);
- } else if haystack.len() < needle.len() {
- return None;
- }
- for i in 0..(haystack.len() - needle.len() + 1) {
- if needle == &haystack[i..i + needle.len()] {
- return Some(i);
- }
- }
- None
- }
-
- /// Naively search in reverse for the given needle in the given haystack.
- fn naive_rfind(haystack: &[u8], needle: &[u8]) -> Option<usize> {
- if needle.is_empty() {
- return Some(haystack.len());
- } else if haystack.len() < needle.len() {
- return None;
- }
- for i in (0..(haystack.len() - needle.len() + 1)).rev() {
- if needle == &haystack[i..i + needle.len()] {
- return Some(i);
- }
- }
- None
- }
-}
-
-/// This module defines some hand-written "simple" substring tests. It
-/// also provides routines for easily running them on any substring search
-/// implementation.
#[cfg(test)]
-mod testsimples {
- define_memmem_simple_tests!(super::find, super::rfind);
-
- /// Each test is a (needle, haystack, expected_fwd, expected_rev) tuple.
- type SearchTest =
- (&'static str, &'static str, Option<usize>, Option<usize>);
-
- const SEARCH_TESTS: &'static [SearchTest] = &[
- ("", "", Some(0), Some(0)),
- ("", "a", Some(0), Some(1)),
- ("", "ab", Some(0), Some(2)),
- ("", "abc", Some(0), Some(3)),
- ("a", "", None, None),
- ("a", "a", Some(0), Some(0)),
- ("a", "aa", Some(0), Some(1)),
- ("a", "ba", Some(1), Some(1)),
- ("a", "bba", Some(2), Some(2)),
- ("a", "bbba", Some(3), Some(3)),
- ("a", "bbbab", Some(3), Some(3)),
- ("a", "bbbabb", Some(3), Some(3)),
- ("a", "bbbabbb", Some(3), Some(3)),
- ("a", "bbbbbb", None, None),
- ("ab", "", None, None),
- ("ab", "a", None, None),
- ("ab", "b", None, None),
- ("ab", "ab", Some(0), Some(0)),
- ("ab", "aab", Some(1), Some(1)),
- ("ab", "aaab", Some(2), Some(2)),
- ("ab", "abaab", Some(0), Some(3)),
- ("ab", "baaab", Some(3), Some(3)),
- ("ab", "acb", None, None),
- ("ab", "abba", Some(0), Some(0)),
- ("abc", "ab", None, None),
- ("abc", "abc", Some(0), Some(0)),
- ("abc", "abcz", Some(0), Some(0)),
- ("abc", "abczz", Some(0), Some(0)),
- ("abc", "zabc", Some(1), Some(1)),
- ("abc", "zzabc", Some(2), Some(2)),
- ("abc", "azbc", None, None),
- ("abc", "abzc", None, None),
- ("abczdef", "abczdefzzzzzzzzzzzzzzzzzzzz", Some(0), Some(0)),
- ("abczdef", "zzzzzzzzzzzzzzzzzzzzabczdef", Some(20), Some(20)),
- ("xyz", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaxyz", Some(32), Some(32)),
- // Failures caught by quickcheck.
- ("\u{0}\u{15}", "\u{0}\u{15}\u{15}\u{0}", Some(0), Some(0)),
- ("\u{0}\u{1e}", "\u{1e}\u{0}", None, None),
- ];
-
- /// Run the substring search tests. `search` should be a closure that
- /// accepts a haystack and a needle and returns the starting position
- /// of the first occurrence of needle in the haystack, or `None` if one
- /// doesn't exist.
- pub(crate) fn run_search_tests_fwd(
- mut search: impl FnMut(&[u8], &[u8]) -> Option<usize>,
- ) {
- for &(needle, haystack, expected_fwd, _) in SEARCH_TESTS {
- let (n, h) = (needle.as_bytes(), haystack.as_bytes());
- assert_eq!(
- expected_fwd,
- search(h, n),
- "needle: {:?}, haystack: {:?}, expected: {:?}",
- n,
- h,
- expected_fwd
- );
- }
- }
-
- /// Run the substring search tests. `search` should be a closure that
- /// accepts a haystack and a needle and returns the starting position of
- /// the last occurrence of needle in the haystack, or `None` if one doesn't
- /// exist.
- pub(crate) fn run_search_tests_rev(
- mut search: impl FnMut(&[u8], &[u8]) -> Option<usize>,
- ) {
- for &(needle, haystack, _, expected_rev) in SEARCH_TESTS {
- let (n, h) = (needle.as_bytes(), haystack.as_bytes());
- assert_eq!(
- expected_rev,
- search(h, n),
- "needle: {:?}, haystack: {:?}, expected: {:?}",
- n,
- h,
- expected_rev
- );
- }
+mod tests {
+ use super::*;
+
+ define_substring_forward_quickcheck!(|h, n| Some(Finder::new(n).find(h)));
+ define_substring_reverse_quickcheck!(|h, n| Some(
+ FinderRev::new(n).rfind(h)
+ ));
+
+ #[test]
+ fn forward() {
+ crate::tests::substring::Runner::new()
+ .fwd(|h, n| Some(Finder::new(n).find(h)))
+ .run();
+ }
+
+ #[test]
+ fn reverse() {
+ crate::tests::substring::Runner::new()
+ .rev(|h, n| Some(FinderRev::new(n).rfind(h)))
+ .run();
}
}
diff --git a/vendor/memchr/src/memmem/prefilter/fallback.rs b/vendor/memchr/src/memmem/prefilter/fallback.rs
deleted file mode 100644
index ae1bbccb3..000000000
--- a/vendor/memchr/src/memmem/prefilter/fallback.rs
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
-This module implements a "fallback" prefilter that only relies on memchr to
-function. While memchr works best when it's explicitly vectorized, its
-fallback implementations are fast enough to make a prefilter like this
-worthwhile.
-
-The essence of this implementation is to identify two rare bytes in a needle
-based on a background frequency distribution of bytes. We then run memchr on the
-rarer byte. For each match, we use the second rare byte as a guard to quickly
-check if a match is possible. If the position passes the guard test, then we do
-a naive memcmp to confirm the match.
-
-In practice, this formulation works amazingly well, primarily because of the
-heuristic use of a background frequency distribution. However, it does have a
-number of weaknesses where it can get quite slow when its background frequency
-distribution doesn't line up with the haystack being searched. This is why we
-have specialized vector routines that essentially take this idea and move the
-guard check into vectorized code. (Those specialized vector routines do still
-make use of the background frequency distribution of bytes though.)
-
-This fallback implementation was originally formulated in regex many moons ago:
-https://github.com/rust-lang/regex/blob/3db8722d0b204a85380fe2a65e13d7065d7dd968/src/literal/imp.rs#L370-L501
-Prior to that, I'm not aware of anyone using this technique in any prominent
-substring search implementation. Although, I'm sure folks have had this same
-insight long before me.
-
-Another version of this also appeared in bstr:
-https://github.com/BurntSushi/bstr/blob/a444256ca7407fe180ee32534688549655b7a38e/src/search/prefilter.rs#L83-L340
-*/
-
-use crate::memmem::{
- prefilter::{PrefilterFnTy, PrefilterState},
- NeedleInfo,
-};
-
-// Check that the functions below satisfy the Prefilter function type.
-const _: PrefilterFnTy = find;
-
-/// Look for a possible occurrence of needle. The position returned
-/// corresponds to the beginning of the occurrence, if one exists.
-///
-/// Callers may assume that this never returns false negatives (i.e., it
-/// never misses an actual occurrence), but must check that the returned
-/// position corresponds to a match. That is, it can return false
-/// positives.
-///
-/// This should only be used when Freqy is constructed for forward
-/// searching.
-pub(crate) fn find(
- prestate: &mut PrefilterState,
- ninfo: &NeedleInfo,
- haystack: &[u8],
- needle: &[u8],
-) -> Option<usize> {
- let mut i = 0;
- let (rare1i, rare2i) = ninfo.rarebytes.as_rare_usize();
- let (rare1, rare2) = ninfo.rarebytes.as_rare_bytes(needle);
- while prestate.is_effective() {
- // Use a fast vectorized implementation to skip to the next
- // occurrence of the rarest byte (heuristically chosen) in the
- // needle.
- let found = crate::memchr(rare1, &haystack[i..])?;
- prestate.update(found);
- i += found;
-
- // If we can't align our first match with the haystack, then a
- // match is impossible.
- if i < rare1i {
- i += 1;
- continue;
- }
-
- // Align our rare2 byte with the haystack. A mismatch means that
- // a match is impossible.
- let aligned_rare2i = i - rare1i + rare2i;
- if haystack.get(aligned_rare2i) != Some(&rare2) {
- i += 1;
- continue;
- }
-
- // We've done what we can. There might be a match here.
- return Some(i - rare1i);
- }
- // The only way we get here is if we believe our skipping heuristic
- // has become ineffective. We're allowed to return false positives,
- // so return the position at which we advanced to, aligned to the
- // haystack.
- Some(i.saturating_sub(rare1i))
-}
-
-#[cfg(all(test, feature = "std"))]
-mod tests {
- use super::*;
-
- fn freqy_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
- let ninfo = NeedleInfo::new(needle);
- let mut prestate = PrefilterState::new();
- find(&mut prestate, &ninfo, haystack, needle)
- }
-
- #[test]
- fn freqy_forward() {
- assert_eq!(Some(0), freqy_find(b"BARFOO", b"BAR"));
- assert_eq!(Some(3), freqy_find(b"FOOBAR", b"BAR"));
- assert_eq!(Some(0), freqy_find(b"zyzz", b"zyzy"));
- assert_eq!(Some(2), freqy_find(b"zzzy", b"zyzy"));
- assert_eq!(None, freqy_find(b"zazb", b"zyzy"));
- assert_eq!(Some(0), freqy_find(b"yzyy", b"yzyz"));
- assert_eq!(Some(2), freqy_find(b"yyyz", b"yzyz"));
- assert_eq!(None, freqy_find(b"yayb", b"yzyz"));
- }
-
- #[test]
- #[cfg(not(miri))]
- fn prefilter_permutations() {
- use crate::memmem::prefilter::tests::PrefilterTest;
-
- // SAFETY: super::find is safe to call for all inputs and on all
- // platforms.
- unsafe { PrefilterTest::run_all_tests(super::find) };
- }
-}
diff --git a/vendor/memchr/src/memmem/prefilter/genericsimd.rs b/vendor/memchr/src/memmem/prefilter/genericsimd.rs
deleted file mode 100644
index 1a6e38734..000000000
--- a/vendor/memchr/src/memmem/prefilter/genericsimd.rs
+++ /dev/null
@@ -1,207 +0,0 @@
-use core::mem::size_of;
-
-use crate::memmem::{
- prefilter::{PrefilterFnTy, PrefilterState},
- vector::Vector,
- NeedleInfo,
-};
-
-/// The implementation of the forward vector accelerated candidate finder.
-///
-/// This is inspired by the "generic SIMD" algorithm described here:
-/// http://0x80.pl/articles/simd-strfind.html#algorithm-1-generic-simd
-///
-/// The main difference is that this is just a prefilter. That is, it reports
-/// candidates once they are seen and doesn't attempt to confirm them. Also,
-/// the bytes this routine uses to check for candidates are selected based on
-/// an a priori background frequency distribution. This means that on most
-/// haystacks, this will on average spend more time in vectorized code than you
-/// would if you just selected the first and last bytes of the needle.
-///
-/// Note that a non-prefilter variant of this algorithm can be found in the
-/// parent module, but it only works on smaller needles.
-///
-/// `prestate`, `ninfo`, `haystack` and `needle` are the four prefilter
-/// function parameters. `fallback` is a prefilter that is used if the haystack
-/// is too small to be handled with the given vector size.
-///
-/// This routine is not safe because it is intended for callers to specialize
-/// this with a particular vector (e.g., __m256i) and then call it with the
-/// relevant target feature (e.g., avx2) enabled.
-///
-/// # Panics
-///
-/// If `needle.len() <= 1`, then this panics.
-///
-/// # Safety
-///
-/// Since this is meant to be used with vector functions, callers need to
-/// specialize this inside of a function with a `target_feature` attribute.
-/// Therefore, callers must ensure that whatever target feature is being used
-/// supports the vector functions that this function is specialized for. (For
-/// the specific vector functions used, see the Vector trait implementations.)
-#[inline(always)]
-pub(crate) unsafe fn find<V: Vector>(
- prestate: &mut PrefilterState,
- ninfo: &NeedleInfo,
- haystack: &[u8],
- needle: &[u8],
- fallback: PrefilterFnTy,
-) -> Option<usize> {
- assert!(needle.len() >= 2, "needle must be at least 2 bytes");
- let (rare1i, rare2i) = ninfo.rarebytes.as_rare_ordered_usize();
- let min_haystack_len = rare2i + size_of::<V>();
- if haystack.len() < min_haystack_len {
- return fallback(prestate, ninfo, haystack, needle);
- }
-
- let start_ptr = haystack.as_ptr();
- let end_ptr = start_ptr.add(haystack.len());
- let max_ptr = end_ptr.sub(min_haystack_len);
- let mut ptr = start_ptr;
-
- let rare1chunk = V::splat(needle[rare1i]);
- let rare2chunk = V::splat(needle[rare2i]);
-
- // N.B. I did experiment with unrolling the loop to deal with size(V)
- // bytes at a time and 2*size(V) bytes at a time. The double unroll
- // was marginally faster while the quadruple unroll was unambiguously
- // slower. In the end, I decided the complexity from unrolling wasn't
- // worth it. I used the memmem/krate/prebuilt/huge-en/ benchmarks to
- // compare.
- while ptr <= max_ptr {
- let m = find_in_chunk2(ptr, rare1i, rare2i, rare1chunk, rare2chunk);
- if let Some(chunki) = m {
- return Some(matched(prestate, start_ptr, ptr, chunki));
- }
- ptr = ptr.add(size_of::<V>());
- }
- if ptr < end_ptr {
- // This routine immediately quits if a candidate match is found.
- // That means that if we're here, no candidate matches have been
- // found at or before 'ptr'. Thus, we don't need to mask anything
- // out even though we might technically search part of the haystack
- // that we've already searched (because we know it can't match).
- ptr = max_ptr;
- let m = find_in_chunk2(ptr, rare1i, rare2i, rare1chunk, rare2chunk);
- if let Some(chunki) = m {
- return Some(matched(prestate, start_ptr, ptr, chunki));
- }
- }
- prestate.update(haystack.len());
- None
-}
-
-// Below are two different techniques for checking whether a candidate
-// match exists in a given chunk or not. find_in_chunk2 checks two bytes
-// where as find_in_chunk3 checks three bytes. The idea behind checking
-// three bytes is that while we do a bit more work per iteration, we
-// decrease the chances of a false positive match being reported and thus
-// make the search faster overall. This actually works out for the
-// memmem/krate/prebuilt/huge-en/never-all-common-bytes benchmark, where
-// using find_in_chunk3 is about 25% faster than find_in_chunk2. However,
-// it turns out that find_in_chunk2 is faster for all other benchmarks, so
-// perhaps the extra check isn't worth it in practice.
-//
-// For now, we go with find_in_chunk2, but we leave find_in_chunk3 around
-// to make it easy to switch to and benchmark when possible.
-
-/// Search for an occurrence of two rare bytes from the needle in the current
-/// chunk pointed to by ptr.
-///
-/// rare1chunk and rare2chunk correspond to vectors with the rare1 and rare2
-/// bytes repeated in each 8-bit lane, respectively.
-///
-/// # Safety
-///
-/// It must be safe to do an unaligned read of size(V) bytes starting at both
-/// (ptr + rare1i) and (ptr + rare2i).
-#[inline(always)]
-unsafe fn find_in_chunk2<V: Vector>(
- ptr: *const u8,
- rare1i: usize,
- rare2i: usize,
- rare1chunk: V,
- rare2chunk: V,
-) -> Option<usize> {
- let chunk0 = V::load_unaligned(ptr.add(rare1i));
- let chunk1 = V::load_unaligned(ptr.add(rare2i));
-
- let eq0 = chunk0.cmpeq(rare1chunk);
- let eq1 = chunk1.cmpeq(rare2chunk);
-
- let match_offsets = eq0.and(eq1).movemask();
- if match_offsets == 0 {
- return None;
- }
- Some(match_offsets.trailing_zeros() as usize)
-}
-
-/// Search for an occurrence of two rare bytes and the first byte (even if one
-/// of the rare bytes is equivalent to the first byte) from the needle in the
-/// current chunk pointed to by ptr.
-///
-/// firstchunk, rare1chunk and rare2chunk correspond to vectors with the first,
-/// rare1 and rare2 bytes repeated in each 8-bit lane, respectively.
-///
-/// # Safety
-///
-/// It must be safe to do an unaligned read of size(V) bytes starting at ptr,
-/// (ptr + rare1i) and (ptr + rare2i).
-#[allow(dead_code)]
-#[inline(always)]
-unsafe fn find_in_chunk3<V: Vector>(
- ptr: *const u8,
- rare1i: usize,
- rare2i: usize,
- firstchunk: V,
- rare1chunk: V,
- rare2chunk: V,
-) -> Option<usize> {
- let chunk0 = V::load_unaligned(ptr);
- let chunk1 = V::load_unaligned(ptr.add(rare1i));
- let chunk2 = V::load_unaligned(ptr.add(rare2i));
-
- let eq0 = chunk0.cmpeq(firstchunk);
- let eq1 = chunk1.cmpeq(rare1chunk);
- let eq2 = chunk2.cmpeq(rare2chunk);
-
- let match_offsets = eq0.and(eq1).and(eq2).movemask();
- if match_offsets == 0 {
- return None;
- }
- Some(match_offsets.trailing_zeros() as usize)
-}
-
-/// Accepts a chunk-relative offset and returns a haystack relative offset
-/// after updating the prefilter state.
-///
-/// Why do we use this unlineable function when a search completes? Well,
-/// I don't know. Really. Obviously this function was not here initially.
-/// When doing profiling, the codegen for the inner loop here looked bad and
-/// I didn't know why. There were a couple extra 'add' instructions and an
-/// extra 'lea' instruction that I couldn't explain. I hypothesized that the
-/// optimizer was having trouble untangling the hot code in the loop from the
-/// code that deals with a candidate match. By putting the latter into an
-/// unlineable function, it kind of forces the issue and it had the intended
-/// effect: codegen improved measurably. It's good for a ~10% improvement
-/// across the board on the memmem/krate/prebuilt/huge-en/ benchmarks.
-#[cold]
-#[inline(never)]
-fn matched(
- prestate: &mut PrefilterState,
- start_ptr: *const u8,
- ptr: *const u8,
- chunki: usize,
-) -> usize {
- let found = diff(ptr, start_ptr) + chunki;
- prestate.update(found);
- found
-}
-
-/// Subtract `b` from `a` and return the difference. `a` must be greater than
-/// or equal to `b`.
-fn diff(a: *const u8, b: *const u8) -> usize {
- debug_assert!(a >= b);
- (a as usize) - (b as usize)
-}
diff --git a/vendor/memchr/src/memmem/prefilter/mod.rs b/vendor/memchr/src/memmem/prefilter/mod.rs
deleted file mode 100644
index 015d3b27a..000000000
--- a/vendor/memchr/src/memmem/prefilter/mod.rs
+++ /dev/null
@@ -1,570 +0,0 @@
-use crate::memmem::{rarebytes::RareNeedleBytes, NeedleInfo};
-
-mod fallback;
-#[cfg(memchr_runtime_simd)]
-mod genericsimd;
-#[cfg(all(not(miri), target_arch = "wasm32", memchr_runtime_simd))]
-mod wasm;
-#[cfg(all(not(miri), target_arch = "x86_64", memchr_runtime_simd))]
-mod x86;
-
-/// The maximum frequency rank permitted for the fallback prefilter. If the
-/// rarest byte in the needle has a frequency rank above this value, then no
-/// prefilter is used if the fallback prefilter would otherwise be selected.
-const MAX_FALLBACK_RANK: usize = 250;
-
-/// A combination of prefilter effectiveness state, the prefilter function and
-/// the needle info required to run a prefilter.
-///
-/// For the most part, these are grouped into a single type for convenience,
-/// instead of needing to pass around all three as distinct function
-/// parameters.
-pub(crate) struct Pre<'a> {
- /// State that tracks the effectiveness of a prefilter.
- pub(crate) state: &'a mut PrefilterState,
- /// The actual prefilter function.
- pub(crate) prefn: PrefilterFn,
- /// Information about a needle, such as its RK hash and rare byte offsets.
- pub(crate) ninfo: &'a NeedleInfo,
-}
-
-impl<'a> Pre<'a> {
- /// Call this prefilter on the given haystack with the given needle.
- #[inline(always)]
- pub(crate) fn call(
- &mut self,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- self.prefn.call(self.state, self.ninfo, haystack, needle)
- }
-
- /// Return true if and only if this prefilter should be used.
- #[inline(always)]
- pub(crate) fn should_call(&mut self) -> bool {
- self.state.is_effective()
- }
-}
-
-/// A prefilter function.
-///
-/// A prefilter function describes both forward and reverse searches.
-/// (Although, we don't currently implement prefilters for reverse searching.)
-/// In the case of a forward search, the position returned corresponds to
-/// the starting offset of a match (confirmed or possible). Its minimum
-/// value is `0`, and its maximum value is `haystack.len() - 1`. In the case
-/// of a reverse search, the position returned corresponds to the position
-/// immediately after a match (confirmed or possible). Its minimum value is `1`
-/// and its maximum value is `haystack.len()`.
-///
-/// In both cases, the position returned is the starting (or ending) point of a
-/// _possible_ match. That is, returning a false positive is okay. A prefilter,
-/// however, must never return any false negatives. That is, if a match exists
-/// at a particular position `i`, then a prefilter _must_ return that position.
-/// It cannot skip past it.
-///
-/// # Safety
-///
-/// A prefilter function is not safe to create, since not all prefilters are
-/// safe to call in all contexts. (e.g., A prefilter that uses AVX instructions
-/// may only be called on x86_64 CPUs with the relevant AVX feature enabled.)
-/// Thus, callers must ensure that when a prefilter function is created that it
-/// is safe to call for the current environment.
-#[derive(Clone, Copy)]
-pub(crate) struct PrefilterFn(PrefilterFnTy);
-
-/// The type of a prefilter function. All prefilters must satisfy this
-/// signature.
-///
-/// Using a function pointer like this does inhibit inlining, but it does
-/// eliminate branching and the extra costs associated with copying a larger
-/// enum. Note also, that using Box<dyn SomePrefilterTrait> can't really work
-/// here, since we want to work in contexts that don't have dynamic memory
-/// allocation. Moreover, in the default configuration of this crate on x86_64
-/// CPUs released in the past ~decade, we will use an AVX2-optimized prefilter,
-/// which generally won't be inlineable into the surrounding code anyway.
-/// (Unless AVX2 is enabled at compile time, but this is typically rare, since
-/// it produces a non-portable binary.)
-pub(crate) type PrefilterFnTy = unsafe fn(
- prestate: &mut PrefilterState,
- ninfo: &NeedleInfo,
- haystack: &[u8],
- needle: &[u8],
-) -> Option<usize>;
-
-// If the haystack is too small for SSE2, then just run memchr on the
-// rarest byte and be done with it. (It is likely that this code path is
-// rarely exercised, since a higher level routine will probably dispatch to
-// Rabin-Karp for such a small haystack.)
-#[cfg(memchr_runtime_simd)]
-fn simple_memchr_fallback(
- _prestate: &mut PrefilterState,
- ninfo: &NeedleInfo,
- haystack: &[u8],
- needle: &[u8],
-) -> Option<usize> {
- let (rare, _) = ninfo.rarebytes.as_rare_ordered_usize();
- crate::memchr(needle[rare], haystack).map(|i| i.saturating_sub(rare))
-}
-
-impl PrefilterFn {
- /// Create a new prefilter function from the function pointer given.
- ///
- /// # Safety
- ///
- /// Callers must ensure that the given prefilter function is safe to call
- /// for all inputs in the current environment. For example, if the given
- /// prefilter function uses AVX instructions, then the caller must ensure
- /// that the appropriate AVX CPU features are enabled.
- pub(crate) unsafe fn new(prefn: PrefilterFnTy) -> PrefilterFn {
- PrefilterFn(prefn)
- }
-
- /// Call the underlying prefilter function with the given arguments.
- pub fn call(
- self,
- prestate: &mut PrefilterState,
- ninfo: &NeedleInfo,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- // SAFETY: Callers have the burden of ensuring that a prefilter
- // function is safe to call for all inputs in the current environment.
- unsafe { (self.0)(prestate, ninfo, haystack, needle) }
- }
-}
-
-impl core::fmt::Debug for PrefilterFn {
- fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
- "<prefilter-fn(...)>".fmt(f)
- }
-}
-
-/// Prefilter controls whether heuristics are used to accelerate searching.
-///
-/// A prefilter refers to the idea of detecting candidate matches very quickly,
-/// and then confirming whether those candidates are full matches. This
-/// idea can be quite effective since it's often the case that looking for
-/// candidates can be a lot faster than running a complete substring search
-/// over the entire input. Namely, looking for candidates can be done with
-/// extremely fast vectorized code.
-///
-/// The downside of a prefilter is that it assumes false positives (which are
-/// candidates generated by a prefilter that aren't matches) are somewhat rare
-/// relative to the frequency of full matches. That is, if a lot of false
-/// positives are generated, then it's possible for search time to be worse
-/// than if the prefilter wasn't enabled in the first place.
-///
-/// Another downside of a prefilter is that it can result in highly variable
-/// performance, where some cases are extraordinarily fast and others aren't.
-/// Typically, variable performance isn't a problem, but it may be for your use
-/// case.
-///
-/// The use of prefilters in this implementation does use a heuristic to detect
-/// when a prefilter might not be carrying its weight, and will dynamically
-/// disable its use. Nevertheless, this configuration option gives callers
-/// the ability to disable prefilters if you have knowledge that they won't be
-/// useful.
-#[derive(Clone, Copy, Debug)]
-#[non_exhaustive]
-pub enum Prefilter {
- /// Never used a prefilter in substring search.
- None,
- /// Automatically detect whether a heuristic prefilter should be used. If
- /// it is used, then heuristics will be used to dynamically disable the
- /// prefilter if it is believed to not be carrying its weight.
- Auto,
-}
-
-impl Default for Prefilter {
- fn default() -> Prefilter {
- Prefilter::Auto
- }
-}
-
-impl Prefilter {
- pub(crate) fn is_none(&self) -> bool {
- match *self {
- Prefilter::None => true,
- _ => false,
- }
- }
-}
-
-/// PrefilterState tracks state associated with the effectiveness of a
-/// prefilter. It is used to track how many bytes, on average, are skipped by
-/// the prefilter. If this average dips below a certain threshold over time,
-/// then the state renders the prefilter inert and stops using it.
-///
-/// A prefilter state should be created for each search. (Where creating an
-/// iterator is treated as a single search.) A prefilter state should only be
-/// created from a `Freqy`. e.g., An inert `Freqy` will produce an inert
-/// `PrefilterState`.
-#[derive(Clone, Debug)]
-pub(crate) struct PrefilterState {
- /// The number of skips that has been executed. This is always 1 greater
- /// than the actual number of skips. The special sentinel value of 0
- /// indicates that the prefilter is inert. This is useful to avoid
- /// additional checks to determine whether the prefilter is still
- /// "effective." Once a prefilter becomes inert, it should no longer be
- /// used (according to our heuristics).
- skips: u32,
- /// The total number of bytes that have been skipped.
- skipped: u32,
-}
-
-impl PrefilterState {
- /// The minimum number of skip attempts to try before considering whether
- /// a prefilter is effective or not.
- const MIN_SKIPS: u32 = 50;
-
- /// The minimum amount of bytes that skipping must average.
- ///
- /// This value was chosen based on varying it and checking
- /// the microbenchmarks. In particular, this can impact the
- /// pathological/repeated-{huge,small} benchmarks quite a bit if it's set
- /// too low.
- const MIN_SKIP_BYTES: u32 = 8;
-
- /// Create a fresh prefilter state.
- pub(crate) fn new() -> PrefilterState {
- PrefilterState { skips: 1, skipped: 0 }
- }
-
- /// Create a fresh prefilter state that is always inert.
- pub(crate) fn inert() -> PrefilterState {
- PrefilterState { skips: 0, skipped: 0 }
- }
-
- /// Update this state with the number of bytes skipped on the last
- /// invocation of the prefilter.
- #[inline]
- pub(crate) fn update(&mut self, skipped: usize) {
- self.skips = self.skips.saturating_add(1);
- // We need to do this dance since it's technically possible for
- // `skipped` to overflow a `u32`. (And we use a `u32` to reduce the
- // size of a prefilter state.)
- if skipped > core::u32::MAX as usize {
- self.skipped = core::u32::MAX;
- } else {
- self.skipped = self.skipped.saturating_add(skipped as u32);
- }
- }
-
- /// Return true if and only if this state indicates that a prefilter is
- /// still effective.
- #[inline]
- pub(crate) fn is_effective(&mut self) -> bool {
- if self.is_inert() {
- return false;
- }
- if self.skips() < PrefilterState::MIN_SKIPS {
- return true;
- }
- if self.skipped >= PrefilterState::MIN_SKIP_BYTES * self.skips() {
- return true;
- }
-
- // We're inert.
- self.skips = 0;
- false
- }
-
- #[inline]
- fn is_inert(&self) -> bool {
- self.skips == 0
- }
-
- #[inline]
- fn skips(&self) -> u32 {
- self.skips.saturating_sub(1)
- }
-}
-
-/// Determine which prefilter function, if any, to use.
-///
-/// This only applies to x86_64 when runtime SIMD detection is enabled (which
-/// is the default). In general, we try to use an AVX prefilter, followed by
-/// SSE and then followed by a generic one based on memchr.
-#[inline(always)]
-pub(crate) fn forward(
- config: &Prefilter,
- rare: &RareNeedleBytes,
- needle: &[u8],
-) -> Option<PrefilterFn> {
- if config.is_none() || needle.len() <= 1 {
- return None;
- }
-
- #[cfg(all(not(miri), target_arch = "x86_64", memchr_runtime_simd))]
- {
- #[cfg(feature = "std")]
- {
- if cfg!(memchr_runtime_avx) {
- if is_x86_feature_detected!("avx2") {
- // SAFETY: x86::avx::find only requires the avx2 feature,
- // which we've just checked above.
- return unsafe { Some(PrefilterFn::new(x86::avx::find)) };
- }
- }
- }
- if cfg!(memchr_runtime_sse2) {
- // SAFETY: x86::sse::find only requires the sse2 feature, which is
- // guaranteed to be available on x86_64.
- return unsafe { Some(PrefilterFn::new(x86::sse::find)) };
- }
- }
- #[cfg(all(not(miri), target_arch = "wasm32", memchr_runtime_simd))]
- {
- // SAFETY: `wasm::find` is actually a safe function
- //
- // Also note that the `if true` is here to prevent, on wasm with simd,
- // rustc warning about the code below being dead code.
- if true {
- return unsafe { Some(PrefilterFn::new(wasm::find)) };
- }
- }
- // Check that our rarest byte has a reasonably low rank. The main issue
- // here is that the fallback prefilter can perform pretty poorly if it's
- // given common bytes. So we try to avoid the worst cases here.
- let (rare1_rank, _) = rare.as_ranks(needle);
- if rare1_rank <= MAX_FALLBACK_RANK {
- // SAFETY: fallback::find is safe to call in all environments.
- return unsafe { Some(PrefilterFn::new(fallback::find)) };
- }
- None
-}
-
-/// Return the minimum length of the haystack in which a prefilter should be
-/// used. If the haystack is below this length, then it's probably not worth
-/// the overhead of running the prefilter.
-///
-/// We used to look at the length of a haystack here. That is, if it was too
-/// small, then don't bother with the prefilter. But two things changed:
-/// the prefilter falls back to memchr for small haystacks, and, at the
-/// meta-searcher level, Rabin-Karp is employed for tiny haystacks anyway.
-///
-/// We keep it around for now in case we want to bring it back.
-#[allow(dead_code)]
-pub(crate) fn minimum_len(_haystack: &[u8], needle: &[u8]) -> usize {
- // If the haystack length isn't greater than needle.len() * FACTOR, then
- // no prefilter will be used. The presumption here is that since there
- // are so few bytes to check, it's not worth running the prefilter since
- // there will need to be a validation step anyway. Thus, the prefilter is
- // largely redundant work.
- //
- // Increase the factor noticeably hurts the
- // memmem/krate/prebuilt/teeny-*/never-john-watson benchmarks.
- const PREFILTER_LENGTH_FACTOR: usize = 2;
- const VECTOR_MIN_LENGTH: usize = 16;
- let min = core::cmp::max(
- VECTOR_MIN_LENGTH,
- PREFILTER_LENGTH_FACTOR * needle.len(),
- );
- // For haystacks with length==min, we still want to avoid the prefilter,
- // so add 1.
- min + 1
-}
-
-#[cfg(all(test, feature = "std", not(miri)))]
-pub(crate) mod tests {
- use std::convert::{TryFrom, TryInto};
-
- use super::*;
- use crate::memmem::{
- prefilter::PrefilterFnTy, rabinkarp, rarebytes::RareNeedleBytes,
- };
-
- // Below is a small jig that generates prefilter tests. The main purpose
- // of this jig is to generate tests of varying needle/haystack lengths
- // in order to try and exercise all code paths in our prefilters. And in
- // particular, this is especially important for vectorized prefilters where
- // certain code paths might only be exercised at certain lengths.
-
- /// A test that represents the input and expected output to a prefilter
- /// function. The test should be able to run with any prefilter function
- /// and get the expected output.
- pub(crate) struct PrefilterTest {
- // These fields represent the inputs and expected output of a forwards
- // prefilter function.
- pub(crate) ninfo: NeedleInfo,
- pub(crate) haystack: Vec<u8>,
- pub(crate) needle: Vec<u8>,
- pub(crate) output: Option<usize>,
- }
-
- impl PrefilterTest {
- /// Run all generated forward prefilter tests on the given prefn.
- ///
- /// # Safety
- ///
- /// Callers must ensure that the given prefilter function pointer is
- /// safe to call for all inputs in the current environment.
- pub(crate) unsafe fn run_all_tests(prefn: PrefilterFnTy) {
- PrefilterTest::run_all_tests_filter(prefn, |_| true)
- }
-
- /// Run all generated forward prefilter tests that pass the given
- /// predicate on the given prefn.
- ///
- /// # Safety
- ///
- /// Callers must ensure that the given prefilter function pointer is
- /// safe to call for all inputs in the current environment.
- pub(crate) unsafe fn run_all_tests_filter(
- prefn: PrefilterFnTy,
- mut predicate: impl FnMut(&PrefilterTest) -> bool,
- ) {
- for seed in PREFILTER_TEST_SEEDS {
- for test in seed.generate() {
- if predicate(&test) {
- test.run(prefn);
- }
- }
- }
- }
-
- /// Create a new prefilter test from a seed and some chose offsets to
- /// rare bytes in the seed's needle.
- ///
- /// If a valid test could not be constructed, then None is returned.
- /// (Currently, we take the approach of massaging tests to be valid
- /// instead of rejecting them outright.)
- fn new(
- seed: PrefilterTestSeed,
- rare1i: usize,
- rare2i: usize,
- haystack_len: usize,
- needle_len: usize,
- output: Option<usize>,
- ) -> Option<PrefilterTest> {
- let mut rare1i: u8 = rare1i.try_into().unwrap();
- let mut rare2i: u8 = rare2i.try_into().unwrap();
- // The '#' byte is never used in a haystack (unless we're expecting
- // a match), while the '@' byte is never used in a needle.
- let mut haystack = vec![b'@'; haystack_len];
- let mut needle = vec![b'#'; needle_len];
- needle[0] = seed.first;
- needle[rare1i as usize] = seed.rare1;
- needle[rare2i as usize] = seed.rare2;
- // If we're expecting a match, then make sure the needle occurs
- // in the haystack at the expected position.
- if let Some(i) = output {
- haystack[i..i + needle.len()].copy_from_slice(&needle);
- }
- // If the operations above lead to rare offsets pointing to the
- // non-first occurrence of a byte, then adjust it. This might lead
- // to redundant tests, but it's simpler than trying to change the
- // generation process I think.
- if let Some(i) = crate::memchr(seed.rare1, &needle) {
- rare1i = u8::try_from(i).unwrap();
- }
- if let Some(i) = crate::memchr(seed.rare2, &needle) {
- rare2i = u8::try_from(i).unwrap();
- }
- let ninfo = NeedleInfo {
- rarebytes: RareNeedleBytes::new(rare1i, rare2i),
- nhash: rabinkarp::NeedleHash::forward(&needle),
- };
- Some(PrefilterTest { ninfo, haystack, needle, output })
- }
-
- /// Run this specific test on the given prefilter function. If the
- /// outputs do no match, then this routine panics with a failure
- /// message.
- ///
- /// # Safety
- ///
- /// Callers must ensure that the given prefilter function pointer is
- /// safe to call for all inputs in the current environment.
- unsafe fn run(&self, prefn: PrefilterFnTy) {
- let mut prestate = PrefilterState::new();
- assert_eq!(
- self.output,
- prefn(
- &mut prestate,
- &self.ninfo,
- &self.haystack,
- &self.needle
- ),
- "ninfo: {:?}, haystack(len={}): {:?}, needle(len={}): {:?}",
- self.ninfo,
- self.haystack.len(),
- std::str::from_utf8(&self.haystack).unwrap(),
- self.needle.len(),
- std::str::from_utf8(&self.needle).unwrap(),
- );
- }
- }
-
- /// A set of prefilter test seeds. Each seed serves as the base for the
- /// generation of many other tests. In essence, the seed captures the
- /// "rare" and first bytes among our needle. The tests generated from each
- /// seed essentially vary the length of the needle and haystack, while
- /// using the rare/first byte configuration from the seed.
- ///
- /// The purpose of this is to test many different needle/haystack lengths.
- /// In particular, some of the vector optimizations might only have bugs
- /// in haystacks of a certain size.
- const PREFILTER_TEST_SEEDS: &[PrefilterTestSeed] = &[
- PrefilterTestSeed { first: b'x', rare1: b'y', rare2: b'z' },
- PrefilterTestSeed { first: b'x', rare1: b'x', rare2: b'z' },
- PrefilterTestSeed { first: b'x', rare1: b'y', rare2: b'x' },
- PrefilterTestSeed { first: b'x', rare1: b'x', rare2: b'x' },
- PrefilterTestSeed { first: b'x', rare1: b'y', rare2: b'y' },
- ];
-
- /// Data that describes a single prefilter test seed.
- #[derive(Clone, Copy)]
- struct PrefilterTestSeed {
- first: u8,
- rare1: u8,
- rare2: u8,
- }
-
- impl PrefilterTestSeed {
- /// Generate a series of prefilter tests from this seed.
- fn generate(self) -> impl Iterator<Item = PrefilterTest> {
- let len_start = 2;
- // The iterator below generates *a lot* of tests. The number of
- // tests was chosen somewhat empirically to be "bearable" when
- // running the test suite.
- //
- // We use an iterator here because the collective haystacks of all
- // these test cases add up to enough memory to OOM a conservative
- // sandbox or a small laptop.
- (len_start..=40).flat_map(move |needle_len| {
- let rare_start = len_start - 1;
- (rare_start..needle_len).flat_map(move |rare1i| {
- (rare1i..needle_len).flat_map(move |rare2i| {
- (needle_len..=66).flat_map(move |haystack_len| {
- PrefilterTest::new(
- self,
- rare1i,
- rare2i,
- haystack_len,
- needle_len,
- None,
- )
- .into_iter()
- .chain(
- (0..=(haystack_len - needle_len)).flat_map(
- move |output| {
- PrefilterTest::new(
- self,
- rare1i,
- rare2i,
- haystack_len,
- needle_len,
- Some(output),
- )
- },
- ),
- )
- })
- })
- })
- })
- }
- }
-}
diff --git a/vendor/memchr/src/memmem/prefilter/wasm.rs b/vendor/memchr/src/memmem/prefilter/wasm.rs
deleted file mode 100644
index 5470c922a..000000000
--- a/vendor/memchr/src/memmem/prefilter/wasm.rs
+++ /dev/null
@@ -1,39 +0,0 @@
-use core::arch::wasm32::v128;
-
-use crate::memmem::{
- prefilter::{PrefilterFnTy, PrefilterState},
- NeedleInfo,
-};
-
-// Check that the functions below satisfy the Prefilter function type.
-const _: PrefilterFnTy = find;
-
-/// A `v128`-accelerated candidate finder for single-substring search.
-#[target_feature(enable = "simd128")]
-pub(crate) fn find(
- prestate: &mut PrefilterState,
- ninfo: &NeedleInfo,
- haystack: &[u8],
- needle: &[u8],
-) -> Option<usize> {
- unsafe {
- super::genericsimd::find::<v128>(
- prestate,
- ninfo,
- haystack,
- needle,
- super::simple_memchr_fallback,
- )
- }
-}
-
-#[cfg(all(test, feature = "std"))]
-mod tests {
- #[test]
- #[cfg(not(miri))]
- fn prefilter_permutations() {
- use crate::memmem::prefilter::tests::PrefilterTest;
- // SAFETY: super::find is safe to call for all inputs on x86.
- unsafe { PrefilterTest::run_all_tests(super::find) };
- }
-}
diff --git a/vendor/memchr/src/memmem/prefilter/x86/avx.rs b/vendor/memchr/src/memmem/prefilter/x86/avx.rs
deleted file mode 100644
index fb11f335b..000000000
--- a/vendor/memchr/src/memmem/prefilter/x86/avx.rs
+++ /dev/null
@@ -1,46 +0,0 @@
-use core::arch::x86_64::__m256i;
-
-use crate::memmem::{
- prefilter::{PrefilterFnTy, PrefilterState},
- NeedleInfo,
-};
-
-// Check that the functions below satisfy the Prefilter function type.
-const _: PrefilterFnTy = find;
-
-/// An AVX2 accelerated candidate finder for single-substring search.
-///
-/// # Safety
-///
-/// Callers must ensure that the avx2 CPU feature is enabled in the current
-/// environment.
-#[target_feature(enable = "avx2")]
-pub(crate) unsafe fn find(
- prestate: &mut PrefilterState,
- ninfo: &NeedleInfo,
- haystack: &[u8],
- needle: &[u8],
-) -> Option<usize> {
- super::super::genericsimd::find::<__m256i>(
- prestate,
- ninfo,
- haystack,
- needle,
- super::sse::find,
- )
-}
-
-#[cfg(test)]
-mod tests {
- #[test]
- #[cfg(not(miri))]
- fn prefilter_permutations() {
- use crate::memmem::prefilter::tests::PrefilterTest;
- if !is_x86_feature_detected!("avx2") {
- return;
- }
- // SAFETY: The safety of super::find only requires that the current
- // CPU support AVX2, which we checked above.
- unsafe { PrefilterTest::run_all_tests(super::find) };
- }
-}
diff --git a/vendor/memchr/src/memmem/prefilter/x86/mod.rs b/vendor/memchr/src/memmem/prefilter/x86/mod.rs
deleted file mode 100644
index 91381e516..000000000
--- a/vendor/memchr/src/memmem/prefilter/x86/mod.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-// We only use AVX when we can detect at runtime whether it's available, which
-// requires std.
-#[cfg(feature = "std")]
-pub(crate) mod avx;
-pub(crate) mod sse;
diff --git a/vendor/memchr/src/memmem/prefilter/x86/sse.rs b/vendor/memchr/src/memmem/prefilter/x86/sse.rs
deleted file mode 100644
index b1c48e1e1..000000000
--- a/vendor/memchr/src/memmem/prefilter/x86/sse.rs
+++ /dev/null
@@ -1,42 +0,0 @@
-use core::arch::x86_64::__m128i;
-
-use crate::memmem::{
- prefilter::{PrefilterFnTy, PrefilterState},
- NeedleInfo,
-};
-
-// Check that the functions below satisfy the Prefilter function type.
-const _: PrefilterFnTy = find;
-
-/// An SSE2 accelerated candidate finder for single-substring search.
-///
-/// # Safety
-///
-/// Callers must ensure that the sse2 CPU feature is enabled in the current
-/// environment. This feature should be enabled in all x86_64 targets.
-#[target_feature(enable = "sse2")]
-pub(crate) unsafe fn find(
- prestate: &mut PrefilterState,
- ninfo: &NeedleInfo,
- haystack: &[u8],
- needle: &[u8],
-) -> Option<usize> {
- super::super::genericsimd::find::<__m128i>(
- prestate,
- ninfo,
- haystack,
- needle,
- super::super::simple_memchr_fallback,
- )
-}
-
-#[cfg(all(test, feature = "std"))]
-mod tests {
- #[test]
- #[cfg(not(miri))]
- fn prefilter_permutations() {
- use crate::memmem::prefilter::tests::PrefilterTest;
- // SAFETY: super::find is safe to call for all inputs on x86.
- unsafe { PrefilterTest::run_all_tests(super::find) };
- }
-}
diff --git a/vendor/memchr/src/memmem/rabinkarp.rs b/vendor/memchr/src/memmem/rabinkarp.rs
deleted file mode 100644
index daa4015d5..000000000
--- a/vendor/memchr/src/memmem/rabinkarp.rs
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
-This module implements the classical Rabin-Karp substring search algorithm,
-with no extra frills. While its use would seem to break our time complexity
-guarantee of O(m+n) (RK's time complexity is O(mn)), we are careful to only
-ever use RK on a constant subset of haystacks. The main point here is that
-RK has good latency properties for small needles/haystacks. It's very quick
-to compute a needle hash and zip through the haystack when compared to
-initializing Two-Way, for example. And this is especially useful for cases
-where the haystack is just too short for vector instructions to do much good.
-
-The hashing function used here is the same one recommended by ESMAJ.
-
-Another choice instead of Rabin-Karp would be Shift-Or. But its latency
-isn't quite as good since its preprocessing time is a bit more expensive
-(both in practice and in theory). However, perhaps Shift-Or has a place
-somewhere else for short patterns. I think the main problem is that it
-requires space proportional to the alphabet and the needle. If we, for
-example, supported needles up to length 16, then the total table size would be
-len(alphabet)*size_of::<u16>()==512 bytes. Which isn't exactly small, and it's
-probably bad to put that on the stack. So ideally, we'd throw it on the heap,
-but we'd really like to write as much code without using alloc/std as possible.
-But maybe it's worth the special casing. It's a TODO to benchmark.
-
-Wikipedia has a decent explanation, if a bit heavy on the theory:
-https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm
-
-But ESMAJ provides something a bit more concrete:
-http://www-igm.univ-mlv.fr/~lecroq/string/node5.html
-
-Finally, aho-corasick uses Rabin-Karp for multiple pattern match in some cases:
-https://github.com/BurntSushi/aho-corasick/blob/3852632f10587db0ff72ef29e88d58bf305a0946/src/packed/rabinkarp.rs
-*/
-
-/// Whether RK is believed to be very fast for the given needle/haystack.
-pub(crate) fn is_fast(haystack: &[u8], _needle: &[u8]) -> bool {
- haystack.len() < 16
-}
-
-/// Search for the first occurrence of needle in haystack using Rabin-Karp.
-pub(crate) fn find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
- find_with(&NeedleHash::forward(needle), haystack, needle)
-}
-
-/// Search for the first occurrence of needle in haystack using Rabin-Karp with
-/// a pre-computed needle hash.
-pub(crate) fn find_with(
- nhash: &NeedleHash,
- mut haystack: &[u8],
- needle: &[u8],
-) -> Option<usize> {
- if haystack.len() < needle.len() {
- return None;
- }
- let start = haystack.as_ptr() as usize;
- let mut hash = Hash::from_bytes_fwd(&haystack[..needle.len()]);
- // N.B. I've experimented with unrolling this loop, but couldn't realize
- // any obvious gains.
- loop {
- if nhash.eq(hash) && is_prefix(haystack, needle) {
- return Some(haystack.as_ptr() as usize - start);
- }
- if needle.len() >= haystack.len() {
- return None;
- }
- hash.roll(&nhash, haystack[0], haystack[needle.len()]);
- haystack = &haystack[1..];
- }
-}
-
-/// Search for the last occurrence of needle in haystack using Rabin-Karp.
-pub(crate) fn rfind(haystack: &[u8], needle: &[u8]) -> Option<usize> {
- rfind_with(&NeedleHash::reverse(needle), haystack, needle)
-}
-
-/// Search for the last occurrence of needle in haystack using Rabin-Karp with
-/// a pre-computed needle hash.
-pub(crate) fn rfind_with(
- nhash: &NeedleHash,
- mut haystack: &[u8],
- needle: &[u8],
-) -> Option<usize> {
- if haystack.len() < needle.len() {
- return None;
- }
- let mut hash =
- Hash::from_bytes_rev(&haystack[haystack.len() - needle.len()..]);
- loop {
- if nhash.eq(hash) && is_suffix(haystack, needle) {
- return Some(haystack.len() - needle.len());
- }
- if needle.len() >= haystack.len() {
- return None;
- }
- hash.roll(
- &nhash,
- haystack[haystack.len() - 1],
- haystack[haystack.len() - needle.len() - 1],
- );
- haystack = &haystack[..haystack.len() - 1];
- }
-}
-
-/// A hash derived from a needle.
-#[derive(Clone, Copy, Debug, Default)]
-pub(crate) struct NeedleHash {
- /// The actual hash.
- hash: Hash,
- /// The factor needed to multiply a byte by in order to subtract it from
- /// the hash. It is defined to be 2^(n-1) (using wrapping exponentiation),
- /// where n is the length of the needle. This is how we "remove" a byte
- /// from the hash once the hash window rolls past it.
- hash_2pow: u32,
-}
-
-impl NeedleHash {
- /// Create a new Rabin-Karp hash for the given needle for use in forward
- /// searching.
- pub(crate) fn forward(needle: &[u8]) -> NeedleHash {
- let mut nh = NeedleHash { hash: Hash::new(), hash_2pow: 1 };
- if needle.is_empty() {
- return nh;
- }
- nh.hash.add(needle[0]);
- for &b in needle.iter().skip(1) {
- nh.hash.add(b);
- nh.hash_2pow = nh.hash_2pow.wrapping_shl(1);
- }
- nh
- }
-
- /// Create a new Rabin-Karp hash for the given needle for use in reverse
- /// searching.
- pub(crate) fn reverse(needle: &[u8]) -> NeedleHash {
- let mut nh = NeedleHash { hash: Hash::new(), hash_2pow: 1 };
- if needle.is_empty() {
- return nh;
- }
- nh.hash.add(needle[needle.len() - 1]);
- for &b in needle.iter().rev().skip(1) {
- nh.hash.add(b);
- nh.hash_2pow = nh.hash_2pow.wrapping_shl(1);
- }
- nh
- }
-
- /// Return true if the hashes are equivalent.
- fn eq(&self, hash: Hash) -> bool {
- self.hash == hash
- }
-}
-
-/// A Rabin-Karp hash. This might represent the hash of a needle, or the hash
-/// of a rolling window in the haystack.
-#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
-pub(crate) struct Hash(u32);
-
-impl Hash {
- /// Create a new hash that represents the empty string.
- pub(crate) fn new() -> Hash {
- Hash(0)
- }
-
- /// Create a new hash from the bytes given for use in forward searches.
- pub(crate) fn from_bytes_fwd(bytes: &[u8]) -> Hash {
- let mut hash = Hash::new();
- for &b in bytes {
- hash.add(b);
- }
- hash
- }
-
- /// Create a new hash from the bytes given for use in reverse searches.
- fn from_bytes_rev(bytes: &[u8]) -> Hash {
- let mut hash = Hash::new();
- for &b in bytes.iter().rev() {
- hash.add(b);
- }
- hash
- }
-
- /// Add 'new' and remove 'old' from this hash. The given needle hash should
- /// correspond to the hash computed for the needle being searched for.
- ///
- /// This is meant to be used when the rolling window of the haystack is
- /// advanced.
- fn roll(&mut self, nhash: &NeedleHash, old: u8, new: u8) {
- self.del(nhash, old);
- self.add(new);
- }
-
- /// Add a byte to this hash.
- fn add(&mut self, byte: u8) {
- self.0 = self.0.wrapping_shl(1).wrapping_add(byte as u32);
- }
-
- /// Remove a byte from this hash. The given needle hash should correspond
- /// to the hash computed for the needle being searched for.
- fn del(&mut self, nhash: &NeedleHash, byte: u8) {
- let factor = nhash.hash_2pow;
- self.0 = self.0.wrapping_sub((byte as u32).wrapping_mul(factor));
- }
-}
-
-/// Returns true if the given needle is a prefix of the given haystack.
-///
-/// We forcefully don't inline the is_prefix call and hint at the compiler that
-/// it is unlikely to be called. This causes the inner rabinkarp loop above
-/// to be a bit tighter and leads to some performance improvement. See the
-/// memmem/krate/prebuilt/sliceslice-words/words benchmark.
-#[cold]
-#[inline(never)]
-fn is_prefix(haystack: &[u8], needle: &[u8]) -> bool {
- crate::memmem::util::is_prefix(haystack, needle)
-}
-
-/// Returns true if the given needle is a suffix of the given haystack.
-///
-/// See is_prefix for why this is forcefully not inlined.
-#[cold]
-#[inline(never)]
-fn is_suffix(haystack: &[u8], needle: &[u8]) -> bool {
- crate::memmem::util::is_suffix(haystack, needle)
-}
-
-#[cfg(test)]
-mod simpletests {
- define_memmem_simple_tests!(super::find, super::rfind);
-}
-
-#[cfg(all(test, feature = "std", not(miri)))]
-mod proptests {
- define_memmem_quickcheck_tests!(super::find, super::rfind);
-}
diff --git a/vendor/memchr/src/memmem/rarebytes.rs b/vendor/memchr/src/memmem/rarebytes.rs
deleted file mode 100644
index fb33f6894..000000000
--- a/vendor/memchr/src/memmem/rarebytes.rs
+++ /dev/null
@@ -1,136 +0,0 @@
-/// A heuristic frequency based detection of rare bytes for substring search.
-///
-/// This detector attempts to pick out two bytes in a needle that are predicted
-/// to occur least frequently. The purpose is to use these bytes to implement
-/// fast candidate search using vectorized code.
-///
-/// A set of offsets is only computed for needles of length 2 or greater.
-/// Smaller needles should be special cased by the substring search algorithm
-/// in use. (e.g., Use memchr for single byte needles.)
-///
-/// Note that we use `u8` to represent the offsets of the rare bytes in a
-/// needle to reduce space usage. This means that rare byte occurring after the
-/// first 255 bytes in a needle will never be used.
-#[derive(Clone, Copy, Debug, Default)]
-pub(crate) struct RareNeedleBytes {
- /// The leftmost offset of the rarest byte in the needle, according to
- /// pre-computed frequency analysis. The "leftmost offset" means that
- /// rare1i <= i for all i where needle[i] == needle[rare1i].
- rare1i: u8,
- /// The leftmost offset of the second rarest byte in the needle, according
- /// to pre-computed frequency analysis. The "leftmost offset" means that
- /// rare2i <= i for all i where needle[i] == needle[rare2i].
- ///
- /// The second rarest byte is used as a type of guard for quickly detecting
- /// a mismatch if the first byte matches. This is a hedge against
- /// pathological cases where the pre-computed frequency analysis may be
- /// off. (But of course, does not prevent *all* pathological cases.)
- ///
- /// In general, rare1i != rare2i by construction, although there is no hard
- /// requirement that they be different. However, since the case of a single
- /// byte needle is handled specially by memchr itself, rare2i generally
- /// always should be different from rare1i since it would otherwise be
- /// ineffective as a guard.
- rare2i: u8,
-}
-
-impl RareNeedleBytes {
- /// Create a new pair of rare needle bytes with the given offsets. This is
- /// only used in tests for generating input data.
- #[cfg(all(test, feature = "std"))]
- pub(crate) fn new(rare1i: u8, rare2i: u8) -> RareNeedleBytes {
- RareNeedleBytes { rare1i, rare2i }
- }
-
- /// Detect the leftmost offsets of the two rarest bytes in the given
- /// needle.
- pub(crate) fn forward(needle: &[u8]) -> RareNeedleBytes {
- if needle.len() <= 1 || needle.len() > core::u8::MAX as usize {
- // For needles bigger than u8::MAX, our offsets aren't big enough.
- // (We make our offsets small to reduce stack copying.)
- // If you have a use case for it, please file an issue. In that
- // case, we should probably just adjust the routine below to pick
- // some rare bytes from the first 255 bytes of the needle.
- //
- // Also note that for needles of size 0 or 1, they are special
- // cased in Two-Way.
- //
- // TODO: Benchmar this.
- return RareNeedleBytes { rare1i: 0, rare2i: 0 };
- }
-
- // Find the rarest two bytes. We make them distinct by construction.
- let (mut rare1, mut rare1i) = (needle[0], 0);
- let (mut rare2, mut rare2i) = (needle[1], 1);
- if rank(rare2) < rank(rare1) {
- core::mem::swap(&mut rare1, &mut rare2);
- core::mem::swap(&mut rare1i, &mut rare2i);
- }
- for (i, &b) in needle.iter().enumerate().skip(2) {
- if rank(b) < rank(rare1) {
- rare2 = rare1;
- rare2i = rare1i;
- rare1 = b;
- rare1i = i as u8;
- } else if b != rare1 && rank(b) < rank(rare2) {
- rare2 = b;
- rare2i = i as u8;
- }
- }
- // While not strictly required, we really don't want these to be
- // equivalent. If they were, it would reduce the effectiveness of
- // candidate searching using these rare bytes by increasing the rate of
- // false positives.
- assert_ne!(rare1i, rare2i);
- RareNeedleBytes { rare1i, rare2i }
- }
-
- /// Return the rare bytes in the given needle in the forward direction.
- /// The needle given must be the same one given to the RareNeedleBytes
- /// constructor.
- pub(crate) fn as_rare_bytes(&self, needle: &[u8]) -> (u8, u8) {
- (needle[self.rare1i as usize], needle[self.rare2i as usize])
- }
-
- /// Return the rare offsets such that the first offset is always <= to the
- /// second offset. This is useful when the caller doesn't care whether
- /// rare1 is rarer than rare2, but just wants to ensure that they are
- /// ordered with respect to one another.
- #[cfg(memchr_runtime_simd)]
- pub(crate) fn as_rare_ordered_usize(&self) -> (usize, usize) {
- let (rare1i, rare2i) = self.as_rare_ordered_u8();
- (rare1i as usize, rare2i as usize)
- }
-
- /// Like as_rare_ordered_usize, but returns the offsets as their native
- /// u8 values.
- #[cfg(memchr_runtime_simd)]
- pub(crate) fn as_rare_ordered_u8(&self) -> (u8, u8) {
- if self.rare1i <= self.rare2i {
- (self.rare1i, self.rare2i)
- } else {
- (self.rare2i, self.rare1i)
- }
- }
-
- /// Return the rare offsets as usize values in the order in which they were
- /// constructed. rare1, for example, is constructed as the "rarer" byte,
- /// and thus, callers may want to treat it differently from rare2.
- pub(crate) fn as_rare_usize(&self) -> (usize, usize) {
- (self.rare1i as usize, self.rare2i as usize)
- }
-
- /// Return the byte frequency rank of each byte. The higher the rank, the
- /// more frequency the byte is predicted to be. The needle given must be
- /// the same one given to the RareNeedleBytes constructor.
- pub(crate) fn as_ranks(&self, needle: &[u8]) -> (usize, usize) {
- let (b1, b2) = self.as_rare_bytes(needle);
- (rank(b1), rank(b2))
- }
-}
-
-/// Return the heuristical frequency rank of the given byte. A lower rank
-/// means the byte is believed to occur less frequently.
-fn rank(b: u8) -> usize {
- crate::memmem::byte_frequencies::BYTE_FREQUENCIES[b as usize] as usize
-}
diff --git a/vendor/memchr/src/memmem/searcher.rs b/vendor/memchr/src/memmem/searcher.rs
new file mode 100644
index 000000000..98b9bd614
--- /dev/null
+++ b/vendor/memchr/src/memmem/searcher.rs
@@ -0,0 +1,1030 @@
+use crate::arch::all::{
+ packedpair::{HeuristicFrequencyRank, Pair},
+ rabinkarp, twoway,
+};
+
+#[cfg(target_arch = "aarch64")]
+use crate::arch::aarch64::neon::packedpair as neon;
+#[cfg(target_arch = "wasm32")]
+use crate::arch::wasm32::simd128::packedpair as simd128;
+#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+use crate::arch::x86_64::{
+ avx2::packedpair as avx2, sse2::packedpair as sse2,
+};
+
+/// A "meta" substring searcher.
+///
+/// To a first approximation, this chooses what it believes to be the "best"
+/// substring search implemnetation based on the needle at construction time.
+/// Then, every call to `find` will execute that particular implementation. To
+/// a second approximation, multiple substring search algorithms may be used,
+/// depending on the haystack. For example, for supremely short haystacks,
+/// Rabin-Karp is typically used.
+///
+/// See the documentation on `Prefilter` for an explanation of the dispatching
+/// mechanism. The quick summary is that an enum has too much overhead and
+/// we can't use dynamic dispatch via traits because we need to work in a
+/// core-only environment. (Dynamic dispatch works in core-only, but you
+/// need `&dyn Trait` and we really need a `Box<dyn Trait>` here. The latter
+/// requires `alloc`.) So instead, we use a union and an appropriately paired
+/// free function to read from the correct field on the union and execute the
+/// chosen substring search implementation.
+#[derive(Clone)]
+pub(crate) struct Searcher {
+ call: SearcherKindFn,
+ kind: SearcherKind,
+ rabinkarp: rabinkarp::Finder,
+}
+
+impl Searcher {
+ /// Creates a new "meta" substring searcher that attempts to choose the
+ /// best algorithm based on the needle, heuristics and what the current
+ /// target supports.
+ #[inline]
+ pub(crate) fn new<R: HeuristicFrequencyRank>(
+ prefilter: PrefilterConfig,
+ ranker: R,
+ needle: &[u8],
+ ) -> Searcher {
+ let rabinkarp = rabinkarp::Finder::new(needle);
+ if needle.len() <= 1 {
+ return if needle.is_empty() {
+ trace!("building empty substring searcher");
+ Searcher {
+ call: searcher_kind_empty,
+ kind: SearcherKind { empty: () },
+ rabinkarp,
+ }
+ } else {
+ trace!("building one-byte substring searcher");
+ debug_assert_eq!(1, needle.len());
+ Searcher {
+ call: searcher_kind_one_byte,
+ kind: SearcherKind { one_byte: needle[0] },
+ rabinkarp,
+ }
+ };
+ }
+ let pair = match Pair::with_ranker(needle, &ranker) {
+ Some(pair) => pair,
+ None => return Searcher::twoway(needle, rabinkarp, None),
+ };
+ debug_assert_ne!(
+ pair.index1(),
+ pair.index2(),
+ "pair offsets should not be equivalent"
+ );
+ #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+ {
+ if let Some(pp) = avx2::Finder::with_pair(needle, pair) {
+ if do_packed_search(needle) {
+ trace!("building x86_64 AVX2 substring searcher");
+ let kind = SearcherKind { avx2: pp };
+ Searcher { call: searcher_kind_avx2, kind, rabinkarp }
+ } else if prefilter.is_none() {
+ Searcher::twoway(needle, rabinkarp, None)
+ } else {
+ let prestrat = Prefilter::avx2(pp, needle);
+ Searcher::twoway(needle, rabinkarp, Some(prestrat))
+ }
+ } else if let Some(pp) = sse2::Finder::with_pair(needle, pair) {
+ if do_packed_search(needle) {
+ trace!("building x86_64 SSE2 substring searcher");
+ let kind = SearcherKind { sse2: pp };
+ Searcher { call: searcher_kind_sse2, kind, rabinkarp }
+ } else if prefilter.is_none() {
+ Searcher::twoway(needle, rabinkarp, None)
+ } else {
+ let prestrat = Prefilter::sse2(pp, needle);
+ Searcher::twoway(needle, rabinkarp, Some(prestrat))
+ }
+ } else if prefilter.is_none() {
+ Searcher::twoway(needle, rabinkarp, None)
+ } else {
+ // We're pretty unlikely to get to this point, but it is
+ // possible to be running on x86_64 without SSE2. Namely, it's
+ // really up to the OS whether it wants to support vector
+ // registers or not.
+ let prestrat = Prefilter::fallback(ranker, pair, needle);
+ Searcher::twoway(needle, rabinkarp, prestrat)
+ }
+ }
+ #[cfg(target_arch = "wasm32")]
+ {
+ if let Some(pp) = simd128::Finder::with_pair(needle, pair) {
+ if do_packed_search(needle) {
+ trace!("building wasm32 simd128 substring searcher");
+ let kind = SearcherKind { simd128: pp };
+ Searcher { call: searcher_kind_simd128, kind, rabinkarp }
+ } else if prefilter.is_none() {
+ Searcher::twoway(needle, rabinkarp, None)
+ } else {
+ let prestrat = Prefilter::simd128(pp, needle);
+ Searcher::twoway(needle, rabinkarp, Some(prestrat))
+ }
+ } else if prefilter.is_none() {
+ Searcher::twoway(needle, rabinkarp, None)
+ } else {
+ let prestrat = Prefilter::fallback(ranker, pair, needle);
+ Searcher::twoway(needle, rabinkarp, prestrat)
+ }
+ }
+ #[cfg(target_arch = "aarch64")]
+ {
+ if let Some(pp) = neon::Finder::with_pair(needle, pair) {
+ if do_packed_search(needle) {
+ trace!("building aarch64 neon substring searcher");
+ let kind = SearcherKind { neon: pp };
+ Searcher { call: searcher_kind_neon, kind, rabinkarp }
+ } else if prefilter.is_none() {
+ Searcher::twoway(needle, rabinkarp, None)
+ } else {
+ let prestrat = Prefilter::neon(pp, needle);
+ Searcher::twoway(needle, rabinkarp, Some(prestrat))
+ }
+ } else if prefilter.is_none() {
+ Searcher::twoway(needle, rabinkarp, None)
+ } else {
+ let prestrat = Prefilter::fallback(ranker, pair, needle);
+ Searcher::twoway(needle, rabinkarp, prestrat)
+ }
+ }
+ #[cfg(not(any(
+ all(target_arch = "x86_64", target_feature = "sse2"),
+ target_arch = "wasm32",
+ target_arch = "aarch64"
+ )))]
+ {
+ if prefilter.is_none() {
+ Searcher::twoway(needle, rabinkarp, None)
+ } else {
+ let prestrat = Prefilter::fallback(ranker, pair, needle);
+ Searcher::twoway(needle, rabinkarp, prestrat)
+ }
+ }
+ }
+
+ /// Creates a new searcher that always uses the Two-Way algorithm. This is
+ /// typically used when vector algorithms are unavailable or inappropriate.
+ /// (For example, when the needle is "too long.")
+ ///
+ /// If a prefilter is given, then the searcher returned will be accelerated
+ /// by the prefilter.
+ #[inline]
+ fn twoway(
+ needle: &[u8],
+ rabinkarp: rabinkarp::Finder,
+ prestrat: Option<Prefilter>,
+ ) -> Searcher {
+ let finder = twoway::Finder::new(needle);
+ match prestrat {
+ None => {
+ trace!("building scalar two-way substring searcher");
+ let kind = SearcherKind { two_way: finder };
+ Searcher { call: searcher_kind_two_way, kind, rabinkarp }
+ }
+ Some(prestrat) => {
+ trace!(
+ "building scalar two-way \
+ substring searcher with a prefilter"
+ );
+ let two_way_with_prefilter =
+ TwoWayWithPrefilter { finder, prestrat };
+ let kind = SearcherKind { two_way_with_prefilter };
+ Searcher {
+ call: searcher_kind_two_way_with_prefilter,
+ kind,
+ rabinkarp,
+ }
+ }
+ }
+ }
+
+ /// Searches the given haystack for the given needle. The needle given
+ /// should be the same as the needle that this finder was initialized
+ /// with.
+ ///
+ /// Inlining this can lead to big wins for latency, and #[inline] doesn't
+ /// seem to be enough in some cases.
+ #[inline(always)]
+ pub(crate) fn find(
+ &self,
+ prestate: &mut PrefilterState,
+ haystack: &[u8],
+ needle: &[u8],
+ ) -> Option<usize> {
+ if haystack.len() < needle.len() {
+ None
+ } else {
+ // SAFETY: By construction, we've ensured that the function
+ // in `self.call` is properly paired with the union used in
+ // `self.kind`.
+ unsafe { (self.call)(self, prestate, haystack, needle) }
+ }
+ }
+}
+
+impl core::fmt::Debug for Searcher {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ f.debug_struct("Searcher")
+ .field("call", &"<searcher function>")
+ .field("kind", &"<searcher kind union>")
+ .field("rabinkarp", &self.rabinkarp)
+ .finish()
+ }
+}
+
+/// A union indicating one of several possible substring search implementations
+/// that are in active use.
+///
+/// This union should only be read by one of the functions prefixed with
+/// `searcher_kind_`. Namely, the correct function is meant to be paired with
+/// the union by the caller, such that the function always reads from the
+/// designated union field.
+#[derive(Clone, Copy)]
+union SearcherKind {
+ empty: (),
+ one_byte: u8,
+ two_way: twoway::Finder,
+ two_way_with_prefilter: TwoWayWithPrefilter,
+ #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+ sse2: crate::arch::x86_64::sse2::packedpair::Finder,
+ #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+ avx2: crate::arch::x86_64::avx2::packedpair::Finder,
+ #[cfg(target_arch = "wasm32")]
+ simd128: crate::arch::wasm32::simd128::packedpair::Finder,
+ #[cfg(target_arch = "aarch64")]
+ neon: crate::arch::aarch64::neon::packedpair::Finder,
+}
+
+/// A two-way substring searcher with a prefilter.
+#[derive(Copy, Clone, Debug)]
+struct TwoWayWithPrefilter {
+ finder: twoway::Finder,
+ prestrat: Prefilter,
+}
+
+/// The type of a substring search function.
+///
+/// # Safety
+///
+/// When using a function of this type, callers must ensure that the correct
+/// function is paired with the value populated in `SearcherKind` union.
+type SearcherKindFn = unsafe fn(
+ searcher: &Searcher,
+ prestate: &mut PrefilterState,
+ haystack: &[u8],
+ needle: &[u8],
+) -> Option<usize>;
+
+/// Reads from the `empty` field of `SearcherKind` to handle the case of
+/// searching for the empty needle. Works on all platforms.
+///
+/// # Safety
+///
+/// Callers must ensure that the `searcher.kind.empty` union field is set.
+unsafe fn searcher_kind_empty(
+ _searcher: &Searcher,
+ _prestate: &mut PrefilterState,
+ _haystack: &[u8],
+ _needle: &[u8],
+) -> Option<usize> {
+ Some(0)
+}
+
+/// Reads from the `one_byte` field of `SearcherKind` to handle the case of
+/// searching for a single byte needle. Works on all platforms.
+///
+/// # Safety
+///
+/// Callers must ensure that the `searcher.kind.one_byte` union field is set.
+unsafe fn searcher_kind_one_byte(
+ searcher: &Searcher,
+ _prestate: &mut PrefilterState,
+ haystack: &[u8],
+ _needle: &[u8],
+) -> Option<usize> {
+ let needle = searcher.kind.one_byte;
+ crate::memchr(needle, haystack)
+}
+
+/// Reads from the `two_way` field of `SearcherKind` to handle the case of
+/// searching for an arbitrary needle without prefilter acceleration. Works on
+/// all platforms.
+///
+/// # Safety
+///
+/// Callers must ensure that the `searcher.kind.two_way` union field is set.
+unsafe fn searcher_kind_two_way(
+ searcher: &Searcher,
+ _prestate: &mut PrefilterState,
+ haystack: &[u8],
+ needle: &[u8],
+) -> Option<usize> {
+ if rabinkarp::is_fast(haystack, needle) {
+ searcher.rabinkarp.find(haystack, needle)
+ } else {
+ searcher.kind.two_way.find(haystack, needle)
+ }
+}
+
+/// Reads from the `two_way_with_prefilter` field of `SearcherKind` to handle
+/// the case of searching for an arbitrary needle with prefilter acceleration.
+/// Works on all platforms.
+///
+/// # Safety
+///
+/// Callers must ensure that the `searcher.kind.two_way_with_prefilter` union
+/// field is set.
+unsafe fn searcher_kind_two_way_with_prefilter(
+ searcher: &Searcher,
+ prestate: &mut PrefilterState,
+ haystack: &[u8],
+ needle: &[u8],
+) -> Option<usize> {
+ if rabinkarp::is_fast(haystack, needle) {
+ searcher.rabinkarp.find(haystack, needle)
+ } else {
+ let TwoWayWithPrefilter { ref finder, ref prestrat } =
+ searcher.kind.two_way_with_prefilter;
+ let pre = Pre { prestate, prestrat };
+ finder.find_with_prefilter(Some(pre), haystack, needle)
+ }
+}
+
+/// Reads from the `sse2` field of `SearcherKind` to execute the x86_64 SSE2
+/// vectorized substring search implementation.
+///
+/// # Safety
+///
+/// Callers must ensure that the `searcher.kind.sse2` union field is set.
+#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+unsafe fn searcher_kind_sse2(
+ searcher: &Searcher,
+ _prestate: &mut PrefilterState,
+ haystack: &[u8],
+ needle: &[u8],
+) -> Option<usize> {
+ let finder = &searcher.kind.sse2;
+ if haystack.len() < finder.min_haystack_len() {
+ searcher.rabinkarp.find(haystack, needle)
+ } else {
+ finder.find(haystack, needle)
+ }
+}
+
+/// Reads from the `avx2` field of `SearcherKind` to execute the x86_64 AVX2
+/// vectorized substring search implementation.
+///
+/// # Safety
+///
+/// Callers must ensure that the `searcher.kind.avx2` union field is set.
+#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+unsafe fn searcher_kind_avx2(
+ searcher: &Searcher,
+ _prestate: &mut PrefilterState,
+ haystack: &[u8],
+ needle: &[u8],
+) -> Option<usize> {
+ let finder = &searcher.kind.avx2;
+ if haystack.len() < finder.min_haystack_len() {
+ searcher.rabinkarp.find(haystack, needle)
+ } else {
+ finder.find(haystack, needle)
+ }
+}
+
+/// Reads from the `simd128` field of `SearcherKind` to execute the wasm32
+/// simd128 vectorized substring search implementation.
+///
+/// # Safety
+///
+/// Callers must ensure that the `searcher.kind.simd128` union field is set.
+#[cfg(target_arch = "wasm32")]
+unsafe fn searcher_kind_simd128(
+ searcher: &Searcher,
+ _prestate: &mut PrefilterState,
+ haystack: &[u8],
+ needle: &[u8],
+) -> Option<usize> {
+ let finder = &searcher.kind.simd128;
+ if haystack.len() < finder.min_haystack_len() {
+ searcher.rabinkarp.find(haystack, needle)
+ } else {
+ finder.find(haystack, needle)
+ }
+}
+
+/// Reads from the `neon` field of `SearcherKind` to execute the aarch64 neon
+/// vectorized substring search implementation.
+///
+/// # Safety
+///
+/// Callers must ensure that the `searcher.kind.neon` union field is set.
+#[cfg(target_arch = "aarch64")]
+unsafe fn searcher_kind_neon(
+ searcher: &Searcher,
+ _prestate: &mut PrefilterState,
+ haystack: &[u8],
+ needle: &[u8],
+) -> Option<usize> {
+ let finder = &searcher.kind.neon;
+ if haystack.len() < finder.min_haystack_len() {
+ searcher.rabinkarp.find(haystack, needle)
+ } else {
+ finder.find(haystack, needle)
+ }
+}
+
+/// A reverse substring searcher.
+#[derive(Clone, Debug)]
+pub(crate) struct SearcherRev {
+ kind: SearcherRevKind,
+ rabinkarp: rabinkarp::FinderRev,
+}
+
+/// The kind of the reverse searcher.
+///
+/// For the reverse case, we don't do any SIMD acceleration or prefilters.
+/// There is no specific technical reason why we don't, but rather don't do it
+/// because it's not clear it's worth the extra code to do so. If you have a
+/// use case for it, please file an issue.
+///
+/// We also don't do the union trick as we do with the forward case and
+/// prefilters. Basically for the same reason we don't have prefilters or
+/// vector algorithms for reverse searching: it's not clear it's worth doing.
+/// Please file an issue if you have a compelling use case for fast reverse
+/// substring search.
+#[derive(Clone, Debug)]
+enum SearcherRevKind {
+ Empty,
+ OneByte { needle: u8 },
+ TwoWay { finder: twoway::FinderRev },
+}
+
+impl SearcherRev {
+ /// Creates a new searcher for finding occurrences of the given needle in
+ /// reverse. That is, it reports the last (instead of the first) occurrence
+ /// of a needle in a haystack.
+ #[inline]
+ pub(crate) fn new(needle: &[u8]) -> SearcherRev {
+ let kind = if needle.len() <= 1 {
+ if needle.is_empty() {
+ trace!("building empty reverse substring searcher");
+ SearcherRevKind::Empty
+ } else {
+ trace!("building one-byte reverse substring searcher");
+ debug_assert_eq!(1, needle.len());
+ SearcherRevKind::OneByte { needle: needle[0] }
+ }
+ } else {
+ trace!("building scalar two-way reverse substring searcher");
+ let finder = twoway::FinderRev::new(needle);
+ SearcherRevKind::TwoWay { finder }
+ };
+ let rabinkarp = rabinkarp::FinderRev::new(needle);
+ SearcherRev { kind, rabinkarp }
+ }
+
+ /// Searches the given haystack for the last occurrence of the given
+ /// needle. The needle given should be the same as the needle that this
+ /// finder was initialized with.
+ #[inline]
+ pub(crate) fn rfind(
+ &self,
+ haystack: &[u8],
+ needle: &[u8],
+ ) -> Option<usize> {
+ if haystack.len() < needle.len() {
+ return None;
+ }
+ match self.kind {
+ SearcherRevKind::Empty => Some(haystack.len()),
+ SearcherRevKind::OneByte { needle } => {
+ crate::memrchr(needle, haystack)
+ }
+ SearcherRevKind::TwoWay { ref finder } => {
+ if rabinkarp::is_fast(haystack, needle) {
+ self.rabinkarp.rfind(haystack, needle)
+ } else {
+ finder.rfind(haystack, needle)
+ }
+ }
+ }
+ }
+}
+
+/// Prefilter controls whether heuristics are used to accelerate searching.
+///
+/// A prefilter refers to the idea of detecting candidate matches very quickly,
+/// and then confirming whether those candidates are full matches. This
+/// idea can be quite effective since it's often the case that looking for
+/// candidates can be a lot faster than running a complete substring search
+/// over the entire input. Namely, looking for candidates can be done with
+/// extremely fast vectorized code.
+///
+/// The downside of a prefilter is that it assumes false positives (which are
+/// candidates generated by a prefilter that aren't matches) are somewhat rare
+/// relative to the frequency of full matches. That is, if a lot of false
+/// positives are generated, then it's possible for search time to be worse
+/// than if the prefilter wasn't enabled in the first place.
+///
+/// Another downside of a prefilter is that it can result in highly variable
+/// performance, where some cases are extraordinarily fast and others aren't.
+/// Typically, variable performance isn't a problem, but it may be for your use
+/// case.
+///
+/// The use of prefilters in this implementation does use a heuristic to detect
+/// when a prefilter might not be carrying its weight, and will dynamically
+/// disable its use. Nevertheless, this configuration option gives callers
+/// the ability to disable prefilters if you have knowledge that they won't be
+/// useful.
+#[derive(Clone, Copy, Debug)]
+#[non_exhaustive]
+pub enum PrefilterConfig {
+ /// Never used a prefilter in substring search.
+ None,
+ /// Automatically detect whether a heuristic prefilter should be used. If
+ /// it is used, then heuristics will be used to dynamically disable the
+ /// prefilter if it is believed to not be carrying its weight.
+ Auto,
+}
+
+impl Default for PrefilterConfig {
+ fn default() -> PrefilterConfig {
+ PrefilterConfig::Auto
+ }
+}
+
+impl PrefilterConfig {
+ /// Returns true when this prefilter is set to the `None` variant.
+ fn is_none(&self) -> bool {
+ matches!(*self, PrefilterConfig::None)
+ }
+}
+
+/// The implementation of a prefilter.
+///
+/// This type encapsulates dispatch to one of several possible choices for a
+/// prefilter. Generally speaking, all prefilters have the same approximate
+/// algorithm: they choose a couple of bytes from the needle that are believed
+/// to be rare, use a fast vector algorithm to look for those bytes and return
+/// positions as candidates for some substring search algorithm (currently only
+/// Two-Way) to confirm as a match or not.
+///
+/// The differences between the algorithms are actually at the vector
+/// implementation level. Namely, we need different routines based on both
+/// which target architecture we're on and what CPU features are supported.
+///
+/// The straight-forwardly obvious approach here is to use an enum, and make
+/// `Prefilter::find` do case analysis to determine which algorithm was
+/// selected and invoke it. However, I've observed that this leads to poor
+/// codegen in some cases, especially in latency sensitive benchmarks. That is,
+/// this approach comes with overhead that I wasn't able to eliminate.
+///
+/// The second obvious approach is to use dynamic dispatch with traits. Doing
+/// that in this context where `Prefilter` owns the selection generally
+/// requires heap allocation, and this code is designed to run in core-only
+/// environments.
+///
+/// So we settle on using a union (that's `PrefilterKind`) and a function
+/// pointer (that's `PrefilterKindFn`). We select the right function pointer
+/// based on which field in the union we set, and that function in turn
+/// knows which field of the union to access. The downside of this approach
+/// is that it forces us to think about safety, but the upside is that
+/// there are some nice latency improvements to benchmarks. (Especially the
+/// `memmem/sliceslice/short` benchmark.)
+///
+/// In cases where we've selected a vector algorithm and the haystack given
+/// is too short, we fallback to the scalar version of `memchr` on the
+/// `rarest_byte`. (The scalar version of `memchr` is still better than a naive
+/// byte-at-a-time loop because it will read in `usize`-sized chunks at a
+/// time.)
+#[derive(Clone, Copy)]
+struct Prefilter {
+ call: PrefilterKindFn,
+ kind: PrefilterKind,
+ rarest_byte: u8,
+ rarest_offset: u8,
+}
+
+impl Prefilter {
+ /// Return a "fallback" prefilter, but only if it is believed to be
+ /// effective.
+ #[inline]
+ fn fallback<R: HeuristicFrequencyRank>(
+ ranker: R,
+ pair: Pair,
+ needle: &[u8],
+ ) -> Option<Prefilter> {
+ /// The maximum frequency rank permitted for the fallback prefilter.
+ /// If the rarest byte in the needle has a frequency rank above this
+ /// value, then no prefilter is used if the fallback prefilter would
+ /// otherwise be selected.
+ const MAX_FALLBACK_RANK: u8 = 250;
+
+ trace!("building fallback prefilter");
+ let rarest_offset = pair.index1();
+ let rarest_byte = needle[usize::from(rarest_offset)];
+ let rarest_rank = ranker.rank(rarest_byte);
+ if rarest_rank > MAX_FALLBACK_RANK {
+ None
+ } else {
+ let finder = crate::arch::all::packedpair::Finder::with_pair(
+ needle,
+ pair.clone(),
+ )?;
+ let call = prefilter_kind_fallback;
+ let kind = PrefilterKind { fallback: finder };
+ Some(Prefilter { call, kind, rarest_byte, rarest_offset })
+ }
+ }
+
+ /// Return a prefilter using a x86_64 SSE2 vector algorithm.
+ #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+ #[inline]
+ fn sse2(finder: sse2::Finder, needle: &[u8]) -> Prefilter {
+ trace!("building x86_64 SSE2 prefilter");
+ let rarest_offset = finder.pair().index1();
+ let rarest_byte = needle[usize::from(rarest_offset)];
+ Prefilter {
+ call: prefilter_kind_sse2,
+ kind: PrefilterKind { sse2: finder },
+ rarest_byte,
+ rarest_offset,
+ }
+ }
+
+ /// Return a prefilter using a x86_64 AVX2 vector algorithm.
+ #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+ #[inline]
+ fn avx2(finder: avx2::Finder, needle: &[u8]) -> Prefilter {
+ trace!("building x86_64 AVX2 prefilter");
+ let rarest_offset = finder.pair().index1();
+ let rarest_byte = needle[usize::from(rarest_offset)];
+ Prefilter {
+ call: prefilter_kind_avx2,
+ kind: PrefilterKind { avx2: finder },
+ rarest_byte,
+ rarest_offset,
+ }
+ }
+
+ /// Return a prefilter using a wasm32 simd128 vector algorithm.
+ #[cfg(target_arch = "wasm32")]
+ #[inline]
+ fn simd128(finder: simd128::Finder, needle: &[u8]) -> Prefilter {
+ trace!("building wasm32 simd128 prefilter");
+ let rarest_offset = finder.pair().index1();
+ let rarest_byte = needle[usize::from(rarest_offset)];
+ Prefilter {
+ call: prefilter_kind_simd128,
+ kind: PrefilterKind { simd128: finder },
+ rarest_byte,
+ rarest_offset,
+ }
+ }
+
+ /// Return a prefilter using a aarch64 neon vector algorithm.
+ #[cfg(target_arch = "aarch64")]
+ #[inline]
+ fn neon(finder: neon::Finder, needle: &[u8]) -> Prefilter {
+ trace!("building aarch64 neon prefilter");
+ let rarest_offset = finder.pair().index1();
+ let rarest_byte = needle[usize::from(rarest_offset)];
+ Prefilter {
+ call: prefilter_kind_neon,
+ kind: PrefilterKind { neon: finder },
+ rarest_byte,
+ rarest_offset,
+ }
+ }
+
+ /// Return a *candidate* position for a match.
+ ///
+ /// When this returns an offset, it implies that a match could begin at
+ /// that offset, but it may not. That is, it is possible for a false
+ /// positive to be returned.
+ ///
+ /// When `None` is returned, then it is guaranteed that there are no
+ /// matches for the needle in the given haystack. That is, it is impossible
+ /// for a false negative to be returned.
+ ///
+ /// The purpose of this routine is to look for candidate matching positions
+ /// as quickly as possible before running a (likely) slower confirmation
+ /// step.
+ #[inline]
+ fn find(&self, haystack: &[u8]) -> Option<usize> {
+ // SAFETY: By construction, we've ensured that the function in
+ // `self.call` is properly paired with the union used in `self.kind`.
+ unsafe { (self.call)(self, haystack) }
+ }
+
+ /// A "simple" prefilter that just looks for the occurrence of the rarest
+ /// byte from the needle. This is generally only used for very small
+ /// haystacks.
+ #[inline]
+ fn find_simple(&self, haystack: &[u8]) -> Option<usize> {
+ // We don't use crate::memchr here because the haystack should be small
+ // enough that memchr won't be able to use vector routines anyway. So
+ // we just skip straight to the fallback implementation which is likely
+ // faster. (A byte-at-a-time loop is only used when the haystack is
+ // smaller than `size_of::<usize>()`.)
+ crate::arch::all::memchr::One::new(self.rarest_byte)
+ .find(haystack)
+ .map(|i| i.saturating_sub(usize::from(self.rarest_offset)))
+ }
+}
+
+impl core::fmt::Debug for Prefilter {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ f.debug_struct("Prefilter")
+ .field("call", &"<prefilter function>")
+ .field("kind", &"<prefilter kind union>")
+ .field("rarest_byte", &self.rarest_byte)
+ .field("rarest_offset", &self.rarest_offset)
+ .finish()
+ }
+}
+
+/// A union indicating one of several possible prefilters that are in active
+/// use.
+///
+/// This union should only be read by one of the functions prefixed with
+/// `prefilter_kind_`. Namely, the correct function is meant to be paired with
+/// the union by the caller, such that the function always reads from the
+/// designated union field.
+#[derive(Clone, Copy)]
+union PrefilterKind {
+ fallback: crate::arch::all::packedpair::Finder,
+ #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+ sse2: crate::arch::x86_64::sse2::packedpair::Finder,
+ #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+ avx2: crate::arch::x86_64::avx2::packedpair::Finder,
+ #[cfg(target_arch = "wasm32")]
+ simd128: crate::arch::wasm32::simd128::packedpair::Finder,
+ #[cfg(target_arch = "aarch64")]
+ neon: crate::arch::aarch64::neon::packedpair::Finder,
+}
+
+/// The type of a prefilter function.
+///
+/// # Safety
+///
+/// When using a function of this type, callers must ensure that the correct
+/// function is paired with the value populated in `PrefilterKind` union.
+type PrefilterKindFn =
+ unsafe fn(strat: &Prefilter, haystack: &[u8]) -> Option<usize>;
+
+/// Reads from the `fallback` field of `PrefilterKind` to execute the fallback
+/// prefilter. Works on all platforms.
+///
+/// # Safety
+///
+/// Callers must ensure that the `strat.kind.fallback` union field is set.
+unsafe fn prefilter_kind_fallback(
+ strat: &Prefilter,
+ haystack: &[u8],
+) -> Option<usize> {
+ strat.kind.fallback.find_prefilter(haystack)
+}
+
+/// Reads from the `sse2` field of `PrefilterKind` to execute the x86_64 SSE2
+/// prefilter.
+///
+/// # Safety
+///
+/// Callers must ensure that the `strat.kind.sse2` union field is set.
+#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+unsafe fn prefilter_kind_sse2(
+ strat: &Prefilter,
+ haystack: &[u8],
+) -> Option<usize> {
+ let finder = &strat.kind.sse2;
+ if haystack.len() < finder.min_haystack_len() {
+ strat.find_simple(haystack)
+ } else {
+ finder.find_prefilter(haystack)
+ }
+}
+
+/// Reads from the `avx2` field of `PrefilterKind` to execute the x86_64 AVX2
+/// prefilter.
+///
+/// # Safety
+///
+/// Callers must ensure that the `strat.kind.avx2` union field is set.
+#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+unsafe fn prefilter_kind_avx2(
+ strat: &Prefilter,
+ haystack: &[u8],
+) -> Option<usize> {
+ let finder = &strat.kind.avx2;
+ if haystack.len() < finder.min_haystack_len() {
+ strat.find_simple(haystack)
+ } else {
+ finder.find_prefilter(haystack)
+ }
+}
+
+/// Reads from the `simd128` field of `PrefilterKind` to execute the wasm32
+/// simd128 prefilter.
+///
+/// # Safety
+///
+/// Callers must ensure that the `strat.kind.simd128` union field is set.
+#[cfg(target_arch = "wasm32")]
+unsafe fn prefilter_kind_simd128(
+ strat: &Prefilter,
+ haystack: &[u8],
+) -> Option<usize> {
+ let finder = &strat.kind.simd128;
+ if haystack.len() < finder.min_haystack_len() {
+ strat.find_simple(haystack)
+ } else {
+ finder.find_prefilter(haystack)
+ }
+}
+
+/// Reads from the `neon` field of `PrefilterKind` to execute the aarch64 neon
+/// prefilter.
+///
+/// # Safety
+///
+/// Callers must ensure that the `strat.kind.neon` union field is set.
+#[cfg(target_arch = "aarch64")]
+unsafe fn prefilter_kind_neon(
+ strat: &Prefilter,
+ haystack: &[u8],
+) -> Option<usize> {
+ let finder = &strat.kind.neon;
+ if haystack.len() < finder.min_haystack_len() {
+ strat.find_simple(haystack)
+ } else {
+ finder.find_prefilter(haystack)
+ }
+}
+
+/// PrefilterState tracks state associated with the effectiveness of a
+/// prefilter. It is used to track how many bytes, on average, are skipped by
+/// the prefilter. If this average dips below a certain threshold over time,
+/// then the state renders the prefilter inert and stops using it.
+///
+/// A prefilter state should be created for each search. (Where creating an
+/// iterator is treated as a single search.) A prefilter state should only be
+/// created from a `Freqy`. e.g., An inert `Freqy` will produce an inert
+/// `PrefilterState`.
+#[derive(Clone, Copy, Debug)]
+pub(crate) struct PrefilterState {
+ /// The number of skips that has been executed. This is always 1 greater
+ /// than the actual number of skips. The special sentinel value of 0
+ /// indicates that the prefilter is inert. This is useful to avoid
+ /// additional checks to determine whether the prefilter is still
+ /// "effective." Once a prefilter becomes inert, it should no longer be
+ /// used (according to our heuristics).
+ skips: u32,
+ /// The total number of bytes that have been skipped.
+ skipped: u32,
+}
+
+impl PrefilterState {
+ /// The minimum number of skip attempts to try before considering whether
+ /// a prefilter is effective or not.
+ const MIN_SKIPS: u32 = 50;
+
+ /// The minimum amount of bytes that skipping must average.
+ ///
+ /// This value was chosen based on varying it and checking
+ /// the microbenchmarks. In particular, this can impact the
+ /// pathological/repeated-{huge,small} benchmarks quite a bit if it's set
+ /// too low.
+ const MIN_SKIP_BYTES: u32 = 8;
+
+ /// Create a fresh prefilter state.
+ #[inline]
+ pub(crate) fn new() -> PrefilterState {
+ PrefilterState { skips: 1, skipped: 0 }
+ }
+
+ /// Update this state with the number of bytes skipped on the last
+ /// invocation of the prefilter.
+ #[inline]
+ fn update(&mut self, skipped: usize) {
+ self.skips = self.skips.saturating_add(1);
+ // We need to do this dance since it's technically possible for
+ // `skipped` to overflow a `u32`. (And we use a `u32` to reduce the
+ // size of a prefilter state.)
+ self.skipped = match u32::try_from(skipped) {
+ Err(_) => core::u32::MAX,
+ Ok(skipped) => self.skipped.saturating_add(skipped),
+ };
+ }
+
+ /// Return true if and only if this state indicates that a prefilter is
+ /// still effective.
+ #[inline]
+ fn is_effective(&mut self) -> bool {
+ if self.is_inert() {
+ return false;
+ }
+ if self.skips() < PrefilterState::MIN_SKIPS {
+ return true;
+ }
+ if self.skipped >= PrefilterState::MIN_SKIP_BYTES * self.skips() {
+ return true;
+ }
+
+ // We're inert.
+ self.skips = 0;
+ false
+ }
+
+ /// Returns true if the prefilter this state represents should no longer
+ /// be used.
+ #[inline]
+ fn is_inert(&self) -> bool {
+ self.skips == 0
+ }
+
+ /// Returns the total number of times the prefilter has been used.
+ #[inline]
+ fn skips(&self) -> u32 {
+ // Remember, `0` is a sentinel value indicating inertness, so we
+ // always need to subtract `1` to get our actual number of skips.
+ self.skips.saturating_sub(1)
+ }
+}
+
+/// A combination of prefilter effectiveness state and the prefilter itself.
+#[derive(Debug)]
+pub(crate) struct Pre<'a> {
+ /// State that tracks the effectiveness of a prefilter.
+ prestate: &'a mut PrefilterState,
+ /// The actual prefilter.
+ prestrat: &'a Prefilter,
+}
+
+impl<'a> Pre<'a> {
+ /// Call this prefilter on the given haystack with the given needle.
+ #[inline]
+ pub(crate) fn find(&mut self, haystack: &[u8]) -> Option<usize> {
+ let result = self.prestrat.find(haystack);
+ self.prestate.update(result.unwrap_or(haystack.len()));
+ result
+ }
+
+ /// Return true if and only if this prefilter should be used.
+ #[inline]
+ pub(crate) fn is_effective(&mut self) -> bool {
+ self.prestate.is_effective()
+ }
+}
+
+/// Returns true if the needle has the right characteristics for a vector
+/// algorithm to handle the entirety of substring search.
+///
+/// Vector algorithms can be used for prefilters for other substring search
+/// algorithms (like Two-Way), but they can also be used for substring search
+/// on their own. When used for substring search, vector algorithms will
+/// quickly identify candidate match positions (just like in the prefilter
+/// case), but instead of returning the candidate position they will try to
+/// confirm the match themselves. Confirmation happens via `memcmp`. This
+/// works well for short needles, but can break down when many false candidate
+/// positions are generated for large needles. Thus, we only permit vector
+/// algorithms to own substring search when the needle is of a certain length.
+#[inline]
+fn do_packed_search(needle: &[u8]) -> bool {
+ /// The minimum length of a needle required for this algorithm. The minimum
+ /// is 2 since a length of 1 should just use memchr and a length of 0 isn't
+ /// a case handled by this searcher.
+ const MIN_LEN: usize = 2;
+
+ /// The maximum length of a needle required for this algorithm.
+ ///
+ /// In reality, there is no hard max here. The code below can handle any
+ /// length needle. (Perhaps that suggests there are missing optimizations.)
+ /// Instead, this is a heuristic and a bound guaranteeing our linear time
+ /// complexity.
+ ///
+ /// It is a heuristic because when a candidate match is found, memcmp is
+ /// run. For very large needles with lots of false positives, memcmp can
+ /// make the code run quite slow.
+ ///
+ /// It is a bound because the worst case behavior with memcmp is
+ /// multiplicative in the size of the needle and haystack, and we want
+ /// to keep that additive. This bound ensures we still meet that bound
+ /// theoretically, since it's just a constant. We aren't acting in bad
+ /// faith here, memcmp on tiny needles is so fast that even in pathological
+ /// cases (see pathological vector benchmarks), this is still just as fast
+ /// or faster in practice.
+ ///
+ /// This specific number was chosen by tweaking a bit and running
+ /// benchmarks. The rare-medium-needle, for example, gets about 5% faster
+ /// by using this algorithm instead of a prefilter-accelerated Two-Way.
+ /// There's also a theoretical desire to keep this number reasonably
+ /// low, to mitigate the impact of pathological cases. I did try 64, and
+ /// some benchmarks got a little better, and others (particularly the
+ /// pathological ones), got a lot worse. So... 32 it is?
+ const MAX_LEN: usize = 32;
+ MIN_LEN <= needle.len() && needle.len() <= MAX_LEN
+}
diff --git a/vendor/memchr/src/memmem/util.rs b/vendor/memchr/src/memmem/util.rs
deleted file mode 100644
index de0e385e1..000000000
--- a/vendor/memchr/src/memmem/util.rs
+++ /dev/null
@@ -1,88 +0,0 @@
-// These routines are meant to be optimized specifically for low latency as
-// compared to the equivalent routines offered by std. (Which may invoke the
-// dynamic linker and call out to libc, which introduces a bit more latency
-// than we'd like.)
-
-/// Returns true if and only if needle is a prefix of haystack.
-#[inline(always)]
-pub(crate) fn is_prefix(haystack: &[u8], needle: &[u8]) -> bool {
- needle.len() <= haystack.len() && memcmp(&haystack[..needle.len()], needle)
-}
-
-/// Returns true if and only if needle is a suffix of haystack.
-#[inline(always)]
-pub(crate) fn is_suffix(haystack: &[u8], needle: &[u8]) -> bool {
- needle.len() <= haystack.len()
- && memcmp(&haystack[haystack.len() - needle.len()..], needle)
-}
-
-/// Return true if and only if x.len() == y.len() && x[i] == y[i] for all
-/// 0 <= i < x.len().
-///
-/// Why not just use actual memcmp for this? Well, memcmp requires calling out
-/// to libc, and this routine is called in fairly hot code paths. Other than
-/// just calling out to libc, it also seems to result in worse codegen. By
-/// rolling our own memcmp in pure Rust, it seems to appear more friendly to
-/// the optimizer.
-///
-/// We mark this as inline always, although, some callers may not want it
-/// inlined for better codegen (like Rabin-Karp). In that case, callers are
-/// advised to create a non-inlineable wrapper routine that calls memcmp.
-#[inline(always)]
-pub(crate) fn memcmp(x: &[u8], y: &[u8]) -> bool {
- if x.len() != y.len() {
- return false;
- }
- // If we don't have enough bytes to do 4-byte at a time loads, then
- // fall back to the naive slow version.
- //
- // TODO: We could do a copy_nonoverlapping combined with a mask instead
- // of a loop. Benchmark it.
- if x.len() < 4 {
- for (&b1, &b2) in x.iter().zip(y) {
- if b1 != b2 {
- return false;
- }
- }
- return true;
- }
- // When we have 4 or more bytes to compare, then proceed in chunks of 4 at
- // a time using unaligned loads.
- //
- // Also, why do 4 byte loads instead of, say, 8 byte loads? The reason is
- // that this particular version of memcmp is likely to be called with tiny
- // needles. That means that if we do 8 byte loads, then a higher proportion
- // of memcmp calls will use the slower variant above. With that said, this
- // is a hypothesis and is only loosely supported by benchmarks. There's
- // likely some improvement that could be made here. The main thing here
- // though is to optimize for latency, not throughput.
-
- // SAFETY: Via the conditional above, we know that both `px` and `py`
- // have the same length, so `px < pxend` implies that `py < pyend`.
- // Thus, derefencing both `px` and `py` in the loop below is safe.
- //
- // Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
- // end of of `px` and `py`. Thus, the final dereference outside of the
- // loop is guaranteed to be valid. (The final comparison will overlap with
- // the last comparison done in the loop for lengths that aren't multiples
- // of four.)
- //
- // Finally, we needn't worry about alignment here, since we do unaligned
- // loads.
- unsafe {
- let (mut px, mut py) = (x.as_ptr(), y.as_ptr());
- let (pxend, pyend) = (px.add(x.len() - 4), py.add(y.len() - 4));
- while px < pxend {
- let vx = (px as *const u32).read_unaligned();
- let vy = (py as *const u32).read_unaligned();
- if vx != vy {
- return false;
- }
- px = px.add(4);
- py = py.add(4);
- }
- let vx = (pxend as *const u32).read_unaligned();
- let vy = (pyend as *const u32).read_unaligned();
- vx == vy
- }
-}
diff --git a/vendor/memchr/src/memmem/vector.rs b/vendor/memchr/src/memmem/vector.rs
deleted file mode 100644
index b81165f8b..000000000
--- a/vendor/memchr/src/memmem/vector.rs
+++ /dev/null
@@ -1,131 +0,0 @@
-/// A trait for describing vector operations used by vectorized searchers.
-///
-/// The trait is highly constrained to low level vector operations needed. In
-/// general, it was invented mostly to be generic over x86's __m128i and
-/// __m256i types. It's likely that once std::simd becomes a thing, we can
-/// migrate to that since the operations required are quite simple.
-///
-/// TODO: Consider moving this trait up a level and using it to implement
-/// memchr as well. The trait might need to grow one or two methods, but
-/// otherwise should be close to sufficient already.
-///
-/// # Safety
-///
-/// All methods are not safe since they are intended to be implemented using
-/// vendor intrinsics, which are also not safe. Callers must ensure that the
-/// appropriate target features are enabled in the calling function, and that
-/// the current CPU supports them. All implementations should avoid marking the
-/// routines with #[target_feature] and instead mark them as #[inline(always)]
-/// to ensure they get appropriately inlined. (inline(always) cannot be used
-/// with target_feature.)
-pub(crate) trait Vector: Copy + core::fmt::Debug {
- /// _mm_set1_epi8 or _mm256_set1_epi8
- unsafe fn splat(byte: u8) -> Self;
- /// _mm_loadu_si128 or _mm256_loadu_si256
- unsafe fn load_unaligned(data: *const u8) -> Self;
- /// _mm_movemask_epi8 or _mm256_movemask_epi8
- unsafe fn movemask(self) -> u32;
- /// _mm_cmpeq_epi8 or _mm256_cmpeq_epi8
- unsafe fn cmpeq(self, vector2: Self) -> Self;
- /// _mm_and_si128 or _mm256_and_si256
- unsafe fn and(self, vector2: Self) -> Self;
-}
-
-#[cfg(target_arch = "x86_64")]
-mod x86sse {
- use super::Vector;
- use core::arch::x86_64::*;
-
- impl Vector for __m128i {
- #[inline(always)]
- unsafe fn splat(byte: u8) -> __m128i {
- _mm_set1_epi8(byte as i8)
- }
-
- #[inline(always)]
- unsafe fn load_unaligned(data: *const u8) -> __m128i {
- _mm_loadu_si128(data as *const __m128i)
- }
-
- #[inline(always)]
- unsafe fn movemask(self) -> u32 {
- _mm_movemask_epi8(self) as u32
- }
-
- #[inline(always)]
- unsafe fn cmpeq(self, vector2: Self) -> __m128i {
- _mm_cmpeq_epi8(self, vector2)
- }
-
- #[inline(always)]
- unsafe fn and(self, vector2: Self) -> __m128i {
- _mm_and_si128(self, vector2)
- }
- }
-}
-
-#[cfg(all(feature = "std", target_arch = "x86_64"))]
-mod x86avx {
- use super::Vector;
- use core::arch::x86_64::*;
-
- impl Vector for __m256i {
- #[inline(always)]
- unsafe fn splat(byte: u8) -> __m256i {
- _mm256_set1_epi8(byte as i8)
- }
-
- #[inline(always)]
- unsafe fn load_unaligned(data: *const u8) -> __m256i {
- _mm256_loadu_si256(data as *const __m256i)
- }
-
- #[inline(always)]
- unsafe fn movemask(self) -> u32 {
- _mm256_movemask_epi8(self) as u32
- }
-
- #[inline(always)]
- unsafe fn cmpeq(self, vector2: Self) -> __m256i {
- _mm256_cmpeq_epi8(self, vector2)
- }
-
- #[inline(always)]
- unsafe fn and(self, vector2: Self) -> __m256i {
- _mm256_and_si256(self, vector2)
- }
- }
-}
-
-#[cfg(target_arch = "wasm32")]
-mod wasm_simd128 {
- use super::Vector;
- use core::arch::wasm32::*;
-
- impl Vector for v128 {
- #[inline(always)]
- unsafe fn splat(byte: u8) -> v128 {
- u8x16_splat(byte)
- }
-
- #[inline(always)]
- unsafe fn load_unaligned(data: *const u8) -> v128 {
- v128_load(data.cast())
- }
-
- #[inline(always)]
- unsafe fn movemask(self) -> u32 {
- u8x16_bitmask(self).into()
- }
-
- #[inline(always)]
- unsafe fn cmpeq(self, vector2: Self) -> v128 {
- u8x16_eq(self, vector2)
- }
-
- #[inline(always)]
- unsafe fn and(self, vector2: Self) -> v128 {
- v128_and(self, vector2)
- }
- }
-}
diff --git a/vendor/memchr/src/memmem/wasm.rs b/vendor/memchr/src/memmem/wasm.rs
deleted file mode 100644
index 4e3ea985c..000000000
--- a/vendor/memchr/src/memmem/wasm.rs
+++ /dev/null
@@ -1,75 +0,0 @@
-use core::arch::wasm32::v128;
-
-use crate::memmem::{genericsimd, NeedleInfo};
-
-/// A `v128` accelerated vectorized substring search routine that only works on
-/// small needles.
-#[derive(Clone, Copy, Debug)]
-pub(crate) struct Forward(genericsimd::Forward);
-
-impl Forward {
- /// Create a new "generic simd" forward searcher. If one could not be
- /// created from the given inputs, then None is returned.
- pub(crate) fn new(ninfo: &NeedleInfo, needle: &[u8]) -> Option<Forward> {
- if !cfg!(memchr_runtime_simd) {
- return None;
- }
- genericsimd::Forward::new(ninfo, needle).map(Forward)
- }
-
- /// Returns the minimum length of haystack that is needed for this searcher
- /// to work. Passing a haystack with a length smaller than this will cause
- /// `find` to panic.
- #[inline(always)]
- pub(crate) fn min_haystack_len(&self) -> usize {
- self.0.min_haystack_len::<v128>()
- }
-
- #[inline(always)]
- pub(crate) fn find(
- &self,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- self.find_impl(haystack, needle)
- }
-
- /// The implementation of find marked with the appropriate target feature.
- #[target_feature(enable = "simd128")]
- fn find_impl(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> {
- unsafe { genericsimd::fwd_find::<v128>(&self.0, haystack, needle) }
- }
-}
-
-#[cfg(all(test, feature = "std", not(miri)))]
-mod tests {
- use crate::memmem::{prefilter::PrefilterState, NeedleInfo};
-
- fn find(
- _: &mut PrefilterState,
- ninfo: &NeedleInfo,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- super::Forward::new(ninfo, needle).unwrap().find(haystack, needle)
- }
-
- #[test]
- fn prefilter_permutations() {
- use crate::memmem::prefilter::tests::PrefilterTest;
-
- unsafe {
- PrefilterTest::run_all_tests_filter(find, |t| {
- // This substring searcher only works on certain configs, so
- // filter our tests such that Forward::new will be guaranteed
- // to succeed. (And also remove tests with a haystack that is
- // too small.)
- let fwd = match super::Forward::new(&t.ninfo, &t.needle) {
- None => return false,
- Some(fwd) => fwd,
- };
- t.haystack.len() >= fwd.min_haystack_len()
- })
- }
- }
-}
diff --git a/vendor/memchr/src/memmem/x86/avx.rs b/vendor/memchr/src/memmem/x86/avx.rs
deleted file mode 100644
index ce168dd37..000000000
--- a/vendor/memchr/src/memmem/x86/avx.rs
+++ /dev/null
@@ -1,139 +0,0 @@
-#[cfg(not(feature = "std"))]
-pub(crate) use self::nostd::Forward;
-#[cfg(feature = "std")]
-pub(crate) use self::std::Forward;
-
-#[cfg(feature = "std")]
-mod std {
- use core::arch::x86_64::{__m128i, __m256i};
-
- use crate::memmem::{genericsimd, NeedleInfo};
-
- /// An AVX accelerated vectorized substring search routine that only works
- /// on small needles.
- #[derive(Clone, Copy, Debug)]
- pub(crate) struct Forward(genericsimd::Forward);
-
- impl Forward {
- /// Create a new "generic simd" forward searcher. If one could not be
- /// created from the given inputs, then None is returned.
- pub(crate) fn new(
- ninfo: &NeedleInfo,
- needle: &[u8],
- ) -> Option<Forward> {
- if !cfg!(memchr_runtime_avx) || !is_x86_feature_detected!("avx2") {
- return None;
- }
- genericsimd::Forward::new(ninfo, needle).map(Forward)
- }
-
- /// Returns the minimum length of haystack that is needed for this
- /// searcher to work. Passing a haystack with a length smaller than
- /// this will cause `find` to panic.
- #[inline(always)]
- pub(crate) fn min_haystack_len(&self) -> usize {
- self.0.min_haystack_len::<__m128i>()
- }
-
- #[inline(always)]
- pub(crate) fn find(
- &self,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- // SAFETY: The only way a Forward value can exist is if the avx2
- // target feature is enabled. This is the only safety requirement
- // for calling the genericsimd searcher.
- unsafe { self.find_impl(haystack, needle) }
- }
-
- /// The implementation of find marked with the appropriate target
- /// feature.
- ///
- /// # Safety
- ///
- /// Callers must ensure that the avx2 CPU feature is enabled in the
- /// current environment.
- #[target_feature(enable = "avx2")]
- unsafe fn find_impl(
- &self,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- if haystack.len() < self.0.min_haystack_len::<__m256i>() {
- genericsimd::fwd_find::<__m128i>(&self.0, haystack, needle)
- } else {
- genericsimd::fwd_find::<__m256i>(&self.0, haystack, needle)
- }
- }
- }
-}
-
-// We still define the avx "forward" type on nostd to make caller code a bit
-// simpler. This avoids needing a lot more conditional compilation.
-#[cfg(not(feature = "std"))]
-mod nostd {
- use crate::memmem::NeedleInfo;
-
- #[derive(Clone, Copy, Debug)]
- pub(crate) struct Forward(());
-
- impl Forward {
- pub(crate) fn new(
- ninfo: &NeedleInfo,
- needle: &[u8],
- ) -> Option<Forward> {
- None
- }
-
- pub(crate) fn min_haystack_len(&self) -> usize {
- unreachable!()
- }
-
- pub(crate) fn find(
- &self,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- unreachable!()
- }
- }
-}
-
-#[cfg(all(test, feature = "std", not(miri)))]
-mod tests {
- use crate::memmem::{prefilter::PrefilterState, NeedleInfo};
-
- fn find(
- _: &mut PrefilterState,
- ninfo: &NeedleInfo,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- super::Forward::new(ninfo, needle).unwrap().find(haystack, needle)
- }
-
- #[test]
- fn prefilter_permutations() {
- use crate::memmem::prefilter::tests::PrefilterTest;
-
- if !is_x86_feature_detected!("avx2") {
- return;
- }
- // SAFETY: The safety of find only requires that the current CPU
- // support AVX2, which we checked above.
- unsafe {
- PrefilterTest::run_all_tests_filter(find, |t| {
- // This substring searcher only works on certain configs, so
- // filter our tests such that Forward::new will be guaranteed
- // to succeed. (And also remove tests with a haystack that is
- // too small.)
- let fwd = match super::Forward::new(&t.ninfo, &t.needle) {
- None => return false,
- Some(fwd) => fwd,
- };
- t.haystack.len() >= fwd.min_haystack_len()
- })
- }
- }
-}
diff --git a/vendor/memchr/src/memmem/x86/mod.rs b/vendor/memchr/src/memmem/x86/mod.rs
deleted file mode 100644
index c1cc73fee..000000000
--- a/vendor/memchr/src/memmem/x86/mod.rs
+++ /dev/null
@@ -1,2 +0,0 @@
-pub(crate) mod avx;
-pub(crate) mod sse;
diff --git a/vendor/memchr/src/memmem/x86/sse.rs b/vendor/memchr/src/memmem/x86/sse.rs
deleted file mode 100644
index 22e7d9933..000000000
--- a/vendor/memchr/src/memmem/x86/sse.rs
+++ /dev/null
@@ -1,89 +0,0 @@
-use core::arch::x86_64::__m128i;
-
-use crate::memmem::{genericsimd, NeedleInfo};
-
-/// An SSE accelerated vectorized substring search routine that only works on
-/// small needles.
-#[derive(Clone, Copy, Debug)]
-pub(crate) struct Forward(genericsimd::Forward);
-
-impl Forward {
- /// Create a new "generic simd" forward searcher. If one could not be
- /// created from the given inputs, then None is returned.
- pub(crate) fn new(ninfo: &NeedleInfo, needle: &[u8]) -> Option<Forward> {
- if !cfg!(memchr_runtime_sse2) {
- return None;
- }
- genericsimd::Forward::new(ninfo, needle).map(Forward)
- }
-
- /// Returns the minimum length of haystack that is needed for this searcher
- /// to work. Passing a haystack with a length smaller than this will cause
- /// `find` to panic.
- #[inline(always)]
- pub(crate) fn min_haystack_len(&self) -> usize {
- self.0.min_haystack_len::<__m128i>()
- }
-
- #[inline(always)]
- pub(crate) fn find(
- &self,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- // SAFETY: sse2 is enabled on all x86_64 targets, so this is always
- // safe to call.
- unsafe { self.find_impl(haystack, needle) }
- }
-
- /// The implementation of find marked with the appropriate target feature.
- ///
- /// # Safety
- ///
- /// This is safe to call in all cases since sse2 is guaranteed to be part
- /// of x86_64. It is marked as unsafe because of the target feature
- /// attribute.
- #[target_feature(enable = "sse2")]
- unsafe fn find_impl(
- &self,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- genericsimd::fwd_find::<__m128i>(&self.0, haystack, needle)
- }
-}
-
-#[cfg(all(test, feature = "std", not(miri)))]
-mod tests {
- use crate::memmem::{prefilter::PrefilterState, NeedleInfo};
-
- fn find(
- _: &mut PrefilterState,
- ninfo: &NeedleInfo,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- super::Forward::new(ninfo, needle).unwrap().find(haystack, needle)
- }
-
- #[test]
- fn prefilter_permutations() {
- use crate::memmem::prefilter::tests::PrefilterTest;
-
- // SAFETY: sse2 is enabled on all x86_64 targets, so this is always
- // safe to call.
- unsafe {
- PrefilterTest::run_all_tests_filter(find, |t| {
- // This substring searcher only works on certain configs, so
- // filter our tests such that Forward::new will be guaranteed
- // to succeed. (And also remove tests with a haystack that is
- // too small.)
- let fwd = match super::Forward::new(&t.ninfo, &t.needle) {
- None => return false,
- Some(fwd) => fwd,
- };
- t.haystack.len() >= fwd.min_haystack_len()
- })
- }
- }
-}
diff --git a/vendor/memchr/src/tests/memchr/iter.rs b/vendor/memchr/src/tests/memchr/iter.rs
deleted file mode 100644
index 80ea5c279..000000000
--- a/vendor/memchr/src/tests/memchr/iter.rs
+++ /dev/null
@@ -1,230 +0,0 @@
-use quickcheck::quickcheck;
-
-use crate::{tests::memchr::testdata::memchr_tests, Memchr, Memchr2, Memchr3};
-
-#[test]
-fn memchr1_iter() {
- for test in memchr_tests() {
- test.iter_one(false, Memchr::new);
- }
-}
-
-#[test]
-fn memchr2_iter() {
- for test in memchr_tests() {
- test.iter_two(false, Memchr2::new);
- }
-}
-
-#[test]
-fn memchr3_iter() {
- for test in memchr_tests() {
- test.iter_three(false, Memchr3::new);
- }
-}
-
-#[test]
-fn memrchr1_iter() {
- for test in memchr_tests() {
- test.iter_one(true, |n1, corpus| Memchr::new(n1, corpus).rev());
- }
-}
-
-#[test]
-fn memrchr2_iter() {
- for test in memchr_tests() {
- test.iter_two(true, |n1, n2, corpus| {
- Memchr2::new(n1, n2, corpus).rev()
- })
- }
-}
-
-#[test]
-fn memrchr3_iter() {
- for test in memchr_tests() {
- test.iter_three(true, |n1, n2, n3, corpus| {
- Memchr3::new(n1, n2, n3, corpus).rev()
- })
- }
-}
-
-quickcheck! {
- fn qc_memchr_double_ended_iter(
- needle: u8, data: Vec<u8>, take_side: Vec<bool>
- ) -> bool {
- // make nonempty
- let mut take_side = take_side;
- if take_side.is_empty() { take_side.push(true) };
-
- let iter = Memchr::new(needle, &data);
- let all_found = double_ended_take(
- iter, take_side.iter().cycle().cloned());
-
- all_found.iter().cloned().eq(positions1(needle, &data))
- }
-
- fn qc_memchr2_double_ended_iter(
- needle1: u8, needle2: u8, data: Vec<u8>, take_side: Vec<bool>
- ) -> bool {
- // make nonempty
- let mut take_side = take_side;
- if take_side.is_empty() { take_side.push(true) };
-
- let iter = Memchr2::new(needle1, needle2, &data);
- let all_found = double_ended_take(
- iter, take_side.iter().cycle().cloned());
-
- all_found.iter().cloned().eq(positions2(needle1, needle2, &data))
- }
-
- fn qc_memchr3_double_ended_iter(
- needle1: u8, needle2: u8, needle3: u8,
- data: Vec<u8>, take_side: Vec<bool>
- ) -> bool {
- // make nonempty
- let mut take_side = take_side;
- if take_side.is_empty() { take_side.push(true) };
-
- let iter = Memchr3::new(needle1, needle2, needle3, &data);
- let all_found = double_ended_take(
- iter, take_side.iter().cycle().cloned());
-
- all_found
- .iter()
- .cloned()
- .eq(positions3(needle1, needle2, needle3, &data))
- }
-
- fn qc_memchr1_iter(data: Vec<u8>) -> bool {
- let needle = 0;
- let answer = positions1(needle, &data);
- answer.eq(Memchr::new(needle, &data))
- }
-
- fn qc_memchr1_rev_iter(data: Vec<u8>) -> bool {
- let needle = 0;
- let answer = positions1(needle, &data);
- answer.rev().eq(Memchr::new(needle, &data).rev())
- }
-
- fn qc_memchr2_iter(data: Vec<u8>) -> bool {
- let needle1 = 0;
- let needle2 = 1;
- let answer = positions2(needle1, needle2, &data);
- answer.eq(Memchr2::new(needle1, needle2, &data))
- }
-
- fn qc_memchr2_rev_iter(data: Vec<u8>) -> bool {
- let needle1 = 0;
- let needle2 = 1;
- let answer = positions2(needle1, needle2, &data);
- answer.rev().eq(Memchr2::new(needle1, needle2, &data).rev())
- }
-
- fn qc_memchr3_iter(data: Vec<u8>) -> bool {
- let needle1 = 0;
- let needle2 = 1;
- let needle3 = 2;
- let answer = positions3(needle1, needle2, needle3, &data);
- answer.eq(Memchr3::new(needle1, needle2, needle3, &data))
- }
-
- fn qc_memchr3_rev_iter(data: Vec<u8>) -> bool {
- let needle1 = 0;
- let needle2 = 1;
- let needle3 = 2;
- let answer = positions3(needle1, needle2, needle3, &data);
- answer.rev().eq(Memchr3::new(needle1, needle2, needle3, &data).rev())
- }
-
- fn qc_memchr1_iter_size_hint(data: Vec<u8>) -> bool {
- // test that the size hint is within reasonable bounds
- let needle = 0;
- let mut iter = Memchr::new(needle, &data);
- let mut real_count = data
- .iter()
- .filter(|&&elt| elt == needle)
- .count();
-
- while let Some(index) = iter.next() {
- real_count -= 1;
- let (lower, upper) = iter.size_hint();
- assert!(lower <= real_count);
- assert!(upper.unwrap() >= real_count);
- assert!(upper.unwrap() <= data.len() - index);
- }
- true
- }
-}
-
-// take items from a DEI, taking front for each true and back for each false.
-// Return a vector with the concatenation of the fronts and the reverse of the
-// backs.
-fn double_ended_take<I, J>(mut iter: I, take_side: J) -> Vec<I::Item>
-where
- I: DoubleEndedIterator,
- J: Iterator<Item = bool>,
-{
- let mut found_front = Vec::new();
- let mut found_back = Vec::new();
-
- for take_front in take_side {
- if take_front {
- if let Some(pos) = iter.next() {
- found_front.push(pos);
- } else {
- break;
- }
- } else {
- if let Some(pos) = iter.next_back() {
- found_back.push(pos);
- } else {
- break;
- }
- };
- }
-
- let mut all_found = found_front;
- all_found.extend(found_back.into_iter().rev());
- all_found
-}
-
-// return an iterator of the 0-based indices of haystack that match the needle
-fn positions1<'a>(
- n1: u8,
- haystack: &'a [u8],
-) -> Box<dyn DoubleEndedIterator<Item = usize> + 'a> {
- let it = haystack
- .iter()
- .enumerate()
- .filter(move |&(_, &b)| b == n1)
- .map(|t| t.0);
- Box::new(it)
-}
-
-fn positions2<'a>(
- n1: u8,
- n2: u8,
- haystack: &'a [u8],
-) -> Box<dyn DoubleEndedIterator<Item = usize> + 'a> {
- let it = haystack
- .iter()
- .enumerate()
- .filter(move |&(_, &b)| b == n1 || b == n2)
- .map(|t| t.0);
- Box::new(it)
-}
-
-fn positions3<'a>(
- n1: u8,
- n2: u8,
- n3: u8,
- haystack: &'a [u8],
-) -> Box<dyn DoubleEndedIterator<Item = usize> + 'a> {
- let it = haystack
- .iter()
- .enumerate()
- .filter(move |&(_, &b)| b == n1 || b == n2 || b == n3)
- .map(|t| t.0);
- Box::new(it)
-}
diff --git a/vendor/memchr/src/tests/memchr/memchr.rs b/vendor/memchr/src/tests/memchr/memchr.rs
deleted file mode 100644
index ac955ed68..000000000
--- a/vendor/memchr/src/tests/memchr/memchr.rs
+++ /dev/null
@@ -1,134 +0,0 @@
-use quickcheck::quickcheck;
-
-use crate::{
- memchr,
- memchr::{fallback, naive},
- memchr2, memchr3, memrchr, memrchr2, memrchr3,
- tests::memchr::testdata::memchr_tests,
-};
-
-#[test]
-fn memchr1_find() {
- for test in memchr_tests() {
- test.one(false, memchr);
- }
-}
-
-#[test]
-fn memchr1_fallback_find() {
- for test in memchr_tests() {
- test.one(false, fallback::memchr);
- }
-}
-
-#[test]
-fn memchr2_find() {
- for test in memchr_tests() {
- test.two(false, memchr2);
- }
-}
-
-#[test]
-fn memchr2_fallback_find() {
- for test in memchr_tests() {
- test.two(false, fallback::memchr2);
- }
-}
-
-#[test]
-fn memchr3_find() {
- for test in memchr_tests() {
- test.three(false, memchr3);
- }
-}
-
-#[test]
-fn memchr3_fallback_find() {
- for test in memchr_tests() {
- test.three(false, fallback::memchr3);
- }
-}
-
-#[test]
-fn memrchr1_find() {
- for test in memchr_tests() {
- test.one(true, memrchr);
- }
-}
-
-#[test]
-fn memrchr1_fallback_find() {
- for test in memchr_tests() {
- test.one(true, fallback::memrchr);
- }
-}
-
-#[test]
-fn memrchr2_find() {
- for test in memchr_tests() {
- test.two(true, memrchr2);
- }
-}
-
-#[test]
-fn memrchr2_fallback_find() {
- for test in memchr_tests() {
- test.two(true, fallback::memrchr2);
- }
-}
-
-#[test]
-fn memrchr3_find() {
- for test in memchr_tests() {
- test.three(true, memrchr3);
- }
-}
-
-#[test]
-fn memrchr3_fallback_find() {
- for test in memchr_tests() {
- test.three(true, fallback::memrchr3);
- }
-}
-
-quickcheck! {
- fn qc_memchr1_matches_naive(n1: u8, corpus: Vec<u8>) -> bool {
- memchr(n1, &corpus) == naive::memchr(n1, &corpus)
- }
-}
-
-quickcheck! {
- fn qc_memchr2_matches_naive(n1: u8, n2: u8, corpus: Vec<u8>) -> bool {
- memchr2(n1, n2, &corpus) == naive::memchr2(n1, n2, &corpus)
- }
-}
-
-quickcheck! {
- fn qc_memchr3_matches_naive(
- n1: u8, n2: u8, n3: u8,
- corpus: Vec<u8>
- ) -> bool {
- memchr3(n1, n2, n3, &corpus) == naive::memchr3(n1, n2, n3, &corpus)
- }
-}
-
-quickcheck! {
- fn qc_memrchr1_matches_naive(n1: u8, corpus: Vec<u8>) -> bool {
- memrchr(n1, &corpus) == naive::memrchr(n1, &corpus)
- }
-}
-
-quickcheck! {
- fn qc_memrchr2_matches_naive(n1: u8, n2: u8, corpus: Vec<u8>) -> bool {
- memrchr2(n1, n2, &corpus) == naive::memrchr2(n1, n2, &corpus)
- }
-}
-
-quickcheck! {
- fn qc_memrchr3_matches_naive(
- n1: u8, n2: u8, n3: u8,
- corpus: Vec<u8>
- ) -> bool {
- memrchr3(n1, n2, n3, &corpus) == naive::memrchr3(n1, n2, n3, &corpus)
- }
-}
diff --git a/vendor/memchr/src/tests/memchr/mod.rs b/vendor/memchr/src/tests/memchr/mod.rs
index 79f94ab56..0564ad4fb 100644
--- a/vendor/memchr/src/tests/memchr/mod.rs
+++ b/vendor/memchr/src/tests/memchr/mod.rs
@@ -1,7 +1,307 @@
-#[cfg(all(feature = "std", not(miri)))]
-mod iter;
-#[cfg(all(feature = "std", not(miri)))]
-mod memchr;
-mod simple;
-#[cfg(all(feature = "std", not(miri)))]
-mod testdata;
+use alloc::{
+ string::{String, ToString},
+ vec,
+ vec::Vec,
+};
+
+use crate::ext::Byte;
+
+pub(crate) mod naive;
+#[macro_use]
+pub(crate) mod prop;
+
+const SEEDS: &'static [Seed] = &[
+ Seed { haystack: "a", needles: &[b'a'], positions: &[0] },
+ Seed { haystack: "aa", needles: &[b'a'], positions: &[0, 1] },
+ Seed { haystack: "aaa", needles: &[b'a'], positions: &[0, 1, 2] },
+ Seed { haystack: "", needles: &[b'a'], positions: &[] },
+ Seed { haystack: "z", needles: &[b'a'], positions: &[] },
+ Seed { haystack: "zz", needles: &[b'a'], positions: &[] },
+ Seed { haystack: "zza", needles: &[b'a'], positions: &[2] },
+ Seed { haystack: "zaza", needles: &[b'a'], positions: &[1, 3] },
+ Seed { haystack: "zzza", needles: &[b'a'], positions: &[3] },
+ Seed { haystack: "\x00a", needles: &[b'a'], positions: &[1] },
+ Seed { haystack: "\x00", needles: &[b'\x00'], positions: &[0] },
+ Seed { haystack: "\x00\x00", needles: &[b'\x00'], positions: &[0, 1] },
+ Seed { haystack: "\x00a\x00", needles: &[b'\x00'], positions: &[0, 2] },
+ Seed { haystack: "zzzzzzzzzzzzzzzza", needles: &[b'a'], positions: &[16] },
+ Seed {
+ haystack: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzza",
+ needles: &[b'a'],
+ positions: &[32],
+ },
+ // two needles (applied to memchr2 + memchr3)
+ Seed { haystack: "az", needles: &[b'a', b'z'], positions: &[0, 1] },
+ Seed { haystack: "az", needles: &[b'a', b'z'], positions: &[0, 1] },
+ Seed { haystack: "az", needles: &[b'x', b'y'], positions: &[] },
+ Seed { haystack: "az", needles: &[b'a', b'y'], positions: &[0] },
+ Seed { haystack: "az", needles: &[b'x', b'z'], positions: &[1] },
+ Seed { haystack: "yyyyaz", needles: &[b'a', b'z'], positions: &[4, 5] },
+ Seed { haystack: "yyyyaz", needles: &[b'z', b'a'], positions: &[4, 5] },
+ // three needles (applied to memchr3)
+ Seed {
+ haystack: "xyz",
+ needles: &[b'x', b'y', b'z'],
+ positions: &[0, 1, 2],
+ },
+ Seed {
+ haystack: "zxy",
+ needles: &[b'x', b'y', b'z'],
+ positions: &[0, 1, 2],
+ },
+ Seed { haystack: "zxy", needles: &[b'x', b'a', b'z'], positions: &[0, 1] },
+ Seed { haystack: "zxy", needles: &[b't', b'a', b'z'], positions: &[0] },
+ Seed { haystack: "yxz", needles: &[b't', b'a', b'z'], positions: &[2] },
+];
+
+/// Runs a host of substring search tests.
+///
+/// This has support for "partial" substring search implementations only work
+/// for a subset of needles/haystacks. For example, the "packed pair" substring
+/// search implementation only works for haystacks of some minimum length based
+/// of the pair of bytes selected and the size of the vector used.
+pub(crate) struct Runner {
+ needle_len: usize,
+}
+
+impl Runner {
+ /// Create a new test runner for forward and reverse byte search
+ /// implementations.
+ ///
+ /// The `needle_len` given must be at most `3` and at least `1`. It
+ /// corresponds to the number of needle bytes to search for.
+ pub(crate) fn new(needle_len: usize) -> Runner {
+ assert!(needle_len >= 1, "needle_len must be at least 1");
+ assert!(needle_len <= 3, "needle_len must be at most 3");
+ Runner { needle_len }
+ }
+
+ /// Run all tests. This panics on the first failure.
+ ///
+ /// If the implementation being tested returns `None` for a particular
+ /// haystack/needle combination, then that test is skipped.
+ pub(crate) fn forward_iter<F>(self, mut test: F)
+ where
+ F: FnMut(&[u8], &[u8]) -> Option<Vec<usize>> + 'static,
+ {
+ for seed in SEEDS.iter() {
+ if seed.needles.len() > self.needle_len {
+ continue;
+ }
+ for t in seed.generate() {
+ let results = match test(t.haystack.as_bytes(), &t.needles) {
+ None => continue,
+ Some(results) => results,
+ };
+ assert_eq!(
+ t.expected,
+ results,
+ "needles: {:?}, haystack: {:?}",
+ t.needles
+ .iter()
+ .map(|&b| b.to_char())
+ .collect::<Vec<char>>(),
+ t.haystack,
+ );
+ }
+ }
+ }
+
+ /// Run all tests in the reverse direction. This panics on the first
+ /// failure.
+ ///
+ /// If the implementation being tested returns `None` for a particular
+ /// haystack/needle combination, then that test is skipped.
+ pub(crate) fn reverse_iter<F>(self, mut test: F)
+ where
+ F: FnMut(&[u8], &[u8]) -> Option<Vec<usize>> + 'static,
+ {
+ for seed in SEEDS.iter() {
+ if seed.needles.len() > self.needle_len {
+ continue;
+ }
+ for t in seed.generate() {
+ let mut results = match test(t.haystack.as_bytes(), &t.needles)
+ {
+ None => continue,
+ Some(results) => results,
+ };
+ results.reverse();
+ assert_eq!(
+ t.expected,
+ results,
+ "needles: {:?}, haystack: {:?}",
+ t.needles
+ .iter()
+ .map(|&b| b.to_char())
+ .collect::<Vec<char>>(),
+ t.haystack,
+ );
+ }
+ }
+ }
+
+ /// Run all tests as counting tests. This panics on the first failure.
+ ///
+ /// That is, this only checks that the number of matches is correct and
+ /// not whether the offsets of each match are.
+ pub(crate) fn count_iter<F>(self, mut test: F)
+ where
+ F: FnMut(&[u8], &[u8]) -> Option<usize> + 'static,
+ {
+ for seed in SEEDS.iter() {
+ if seed.needles.len() > self.needle_len {
+ continue;
+ }
+ for t in seed.generate() {
+ let got = match test(t.haystack.as_bytes(), &t.needles) {
+ None => continue,
+ Some(got) => got,
+ };
+ assert_eq!(
+ t.expected.len(),
+ got,
+ "needles: {:?}, haystack: {:?}",
+ t.needles
+ .iter()
+ .map(|&b| b.to_char())
+ .collect::<Vec<char>>(),
+ t.haystack,
+ );
+ }
+ }
+ }
+
+ /// Like `Runner::forward`, but for a function that returns only the next
+ /// match and not all matches.
+ ///
+ /// If the function returns `None`, then it is skipped.
+ pub(crate) fn forward_oneshot<F>(self, mut test: F)
+ where
+ F: FnMut(&[u8], &[u8]) -> Option<Option<usize>> + 'static,
+ {
+ self.forward_iter(move |haystack, needles| {
+ let mut start = 0;
+ let mut results = vec![];
+ while let Some(i) = test(&haystack[start..], needles)? {
+ results.push(start + i);
+ start += i + 1;
+ }
+ Some(results)
+ })
+ }
+
+ /// Like `Runner::reverse`, but for a function that returns only the last
+ /// match and not all matches.
+ ///
+ /// If the function returns `None`, then it is skipped.
+ pub(crate) fn reverse_oneshot<F>(self, mut test: F)
+ where
+ F: FnMut(&[u8], &[u8]) -> Option<Option<usize>> + 'static,
+ {
+ self.reverse_iter(move |haystack, needles| {
+ let mut end = haystack.len();
+ let mut results = vec![];
+ while let Some(i) = test(&haystack[..end], needles)? {
+ results.push(i);
+ end = i;
+ }
+ Some(results)
+ })
+ }
+}
+
+/// A single test for memr?chr{,2,3}.
+#[derive(Clone, Debug)]
+struct Test {
+ /// The string to search in.
+ haystack: String,
+ /// The needles to look for.
+ needles: Vec<u8>,
+ /// The offsets that are expected to be found for all needles in the
+ /// forward direction.
+ expected: Vec<usize>,
+}
+
+impl Test {
+ fn new(seed: &Seed) -> Test {
+ Test {
+ haystack: seed.haystack.to_string(),
+ needles: seed.needles.to_vec(),
+ expected: seed.positions.to_vec(),
+ }
+ }
+}
+
+/// Data that can be expanded into many memchr tests by padding out the corpus.
+#[derive(Clone, Debug)]
+struct Seed {
+ /// The thing to search. We use `&str` instead of `&[u8]` because they
+ /// are nicer to write in tests, and we don't miss much since memchr
+ /// doesn't care about UTF-8.
+ ///
+ /// Corpora cannot contain either '%' or '#'. We use these bytes when
+ /// expanding test cases into many test cases, and we assume they are not
+ /// used. If they are used, `memchr_tests` will panic.
+ haystack: &'static str,
+ /// The needles to search for. This is intended to be an alternation of
+ /// needles. The number of needles may cause this test to be skipped for
+ /// some memchr variants. For example, a test with 2 needles cannot be used
+ /// to test `memchr`, but can be used to test `memchr2` and `memchr3`.
+ /// However, a test with only 1 needle can be used to test all of `memchr`,
+ /// `memchr2` and `memchr3`. We achieve this by filling in the needles with
+ /// bytes that we never used in the corpus (such as '#').
+ needles: &'static [u8],
+ /// The positions expected to match for all of the needles.
+ positions: &'static [usize],
+}
+
+impl Seed {
+ /// Controls how much we expand the haystack on either side for each test.
+ /// We lower this on Miri because otherwise running the tests would take
+ /// forever.
+ const EXPAND_LEN: usize = {
+ #[cfg(not(miri))]
+ {
+ 515
+ }
+ #[cfg(miri)]
+ {
+ 6
+ }
+ };
+
+ /// Expand this test into many variations of the same test.
+ ///
+ /// In particular, this will generate more tests with larger corpus sizes.
+ /// The expected positions are updated to maintain the integrity of the
+ /// test.
+ ///
+ /// This is important in testing a memchr implementation, because there are
+ /// often different cases depending on the length of the corpus.
+ ///
+ /// Note that we extend the corpus by adding `%` bytes, which we
+ /// don't otherwise use as a needle.
+ fn generate(&self) -> impl Iterator<Item = Test> {
+ let mut more = vec![];
+
+ // Add bytes to the start of the corpus.
+ for i in 0..Seed::EXPAND_LEN {
+ let mut t = Test::new(self);
+ let mut new: String = core::iter::repeat('%').take(i).collect();
+ new.push_str(&t.haystack);
+ t.haystack = new;
+ t.expected = t.expected.into_iter().map(|p| p + i).collect();
+ more.push(t);
+ }
+ // Add bytes to the end of the corpus.
+ for i in 1..Seed::EXPAND_LEN {
+ let mut t = Test::new(self);
+ let padding: String = core::iter::repeat('%').take(i).collect();
+ t.haystack.push_str(&padding);
+ more.push(t);
+ }
+
+ more.into_iter()
+ }
+}
diff --git a/vendor/memchr/src/tests/memchr/naive.rs b/vendor/memchr/src/tests/memchr/naive.rs
new file mode 100644
index 000000000..6ebcdaea7
--- /dev/null
+++ b/vendor/memchr/src/tests/memchr/naive.rs
@@ -0,0 +1,33 @@
+pub(crate) fn memchr(n1: u8, haystack: &[u8]) -> Option<usize> {
+ haystack.iter().position(|&b| b == n1)
+}
+
+pub(crate) fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
+ haystack.iter().position(|&b| b == n1 || b == n2)
+}
+
+pub(crate) fn memchr3(
+ n1: u8,
+ n2: u8,
+ n3: u8,
+ haystack: &[u8],
+) -> Option<usize> {
+ haystack.iter().position(|&b| b == n1 || b == n2 || b == n3)
+}
+
+pub(crate) fn memrchr(n1: u8, haystack: &[u8]) -> Option<usize> {
+ haystack.iter().rposition(|&b| b == n1)
+}
+
+pub(crate) fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option<usize> {
+ haystack.iter().rposition(|&b| b == n1 || b == n2)
+}
+
+pub(crate) fn memrchr3(
+ n1: u8,
+ n2: u8,
+ n3: u8,
+ haystack: &[u8],
+) -> Option<usize> {
+ haystack.iter().rposition(|&b| b == n1 || b == n2 || b == n3)
+}
diff --git a/vendor/memchr/src/tests/memchr/prop.rs b/vendor/memchr/src/tests/memchr/prop.rs
new file mode 100644
index 000000000..b9882602b
--- /dev/null
+++ b/vendor/memchr/src/tests/memchr/prop.rs
@@ -0,0 +1,321 @@
+#[cfg(miri)]
+#[macro_export]
+macro_rules! define_memchr_quickcheck {
+ ($($tt:tt)*) => {};
+}
+
+#[cfg(not(miri))]
+#[macro_export]
+macro_rules! define_memchr_quickcheck {
+ ($mod:ident) => {
+ define_memchr_quickcheck!($mod, new);
+ };
+ ($mod:ident, $cons:ident) => {
+ use alloc::vec::Vec;
+
+ use quickcheck::TestResult;
+
+ use crate::tests::memchr::{
+ naive,
+ prop::{double_ended_take, naive1_iter, naive2_iter, naive3_iter},
+ };
+
+ quickcheck::quickcheck! {
+ fn qc_memchr_matches_naive(n1: u8, corpus: Vec<u8>) -> TestResult {
+ let expected = naive::memchr(n1, &corpus);
+ let got = match $mod::One::$cons(n1) {
+ None => return TestResult::discard(),
+ Some(f) => f.find(&corpus),
+ };
+ TestResult::from_bool(expected == got)
+ }
+
+ fn qc_memrchr_matches_naive(n1: u8, corpus: Vec<u8>) -> TestResult {
+ let expected = naive::memrchr(n1, &corpus);
+ let got = match $mod::One::$cons(n1) {
+ None => return TestResult::discard(),
+ Some(f) => f.rfind(&corpus),
+ };
+ TestResult::from_bool(expected == got)
+ }
+
+ fn qc_memchr2_matches_naive(n1: u8, n2: u8, corpus: Vec<u8>) -> TestResult {
+ let expected = naive::memchr2(n1, n2, &corpus);
+ let got = match $mod::Two::$cons(n1, n2) {
+ None => return TestResult::discard(),
+ Some(f) => f.find(&corpus),
+ };
+ TestResult::from_bool(expected == got)
+ }
+
+ fn qc_memrchr2_matches_naive(n1: u8, n2: u8, corpus: Vec<u8>) -> TestResult {
+ let expected = naive::memrchr2(n1, n2, &corpus);
+ let got = match $mod::Two::$cons(n1, n2) {
+ None => return TestResult::discard(),
+ Some(f) => f.rfind(&corpus),
+ };
+ TestResult::from_bool(expected == got)
+ }
+
+ fn qc_memchr3_matches_naive(
+ n1: u8, n2: u8, n3: u8,
+ corpus: Vec<u8>
+ ) -> TestResult {
+ let expected = naive::memchr3(n1, n2, n3, &corpus);
+ let got = match $mod::Three::$cons(n1, n2, n3) {
+ None => return TestResult::discard(),
+ Some(f) => f.find(&corpus),
+ };
+ TestResult::from_bool(expected == got)
+ }
+
+ fn qc_memrchr3_matches_naive(
+ n1: u8, n2: u8, n3: u8,
+ corpus: Vec<u8>
+ ) -> TestResult {
+ let expected = naive::memrchr3(n1, n2, n3, &corpus);
+ let got = match $mod::Three::$cons(n1, n2, n3) {
+ None => return TestResult::discard(),
+ Some(f) => f.rfind(&corpus),
+ };
+ TestResult::from_bool(expected == got)
+ }
+
+ fn qc_memchr_double_ended_iter(
+ needle: u8, data: Vec<u8>, take_side: Vec<bool>
+ ) -> TestResult {
+ // make nonempty
+ let mut take_side = take_side;
+ if take_side.is_empty() { take_side.push(true) };
+
+ let finder = match $mod::One::$cons(needle) {
+ None => return TestResult::discard(),
+ Some(finder) => finder,
+ };
+ let iter = finder.iter(&data);
+ let got = double_ended_take(
+ iter,
+ take_side.iter().cycle().cloned(),
+ );
+ let expected = naive1_iter(needle, &data);
+
+ TestResult::from_bool(got.iter().cloned().eq(expected))
+ }
+
+ fn qc_memchr2_double_ended_iter(
+ needle1: u8, needle2: u8, data: Vec<u8>, take_side: Vec<bool>
+ ) -> TestResult {
+ // make nonempty
+ let mut take_side = take_side;
+ if take_side.is_empty() { take_side.push(true) };
+
+ let finder = match $mod::Two::$cons(needle1, needle2) {
+ None => return TestResult::discard(),
+ Some(finder) => finder,
+ };
+ let iter = finder.iter(&data);
+ let got = double_ended_take(
+ iter,
+ take_side.iter().cycle().cloned(),
+ );
+ let expected = naive2_iter(needle1, needle2, &data);
+
+ TestResult::from_bool(got.iter().cloned().eq(expected))
+ }
+
+ fn qc_memchr3_double_ended_iter(
+ needle1: u8, needle2: u8, needle3: u8,
+ data: Vec<u8>, take_side: Vec<bool>
+ ) -> TestResult {
+ // make nonempty
+ let mut take_side = take_side;
+ if take_side.is_empty() { take_side.push(true) };
+
+ let finder = match $mod::Three::$cons(needle1, needle2, needle3) {
+ None => return TestResult::discard(),
+ Some(finder) => finder,
+ };
+ let iter = finder.iter(&data);
+ let got = double_ended_take(
+ iter,
+ take_side.iter().cycle().cloned(),
+ );
+ let expected = naive3_iter(needle1, needle2, needle3, &data);
+
+ TestResult::from_bool(got.iter().cloned().eq(expected))
+ }
+
+ fn qc_memchr1_iter(data: Vec<u8>) -> TestResult {
+ let needle = 0;
+ let finder = match $mod::One::$cons(needle) {
+ None => return TestResult::discard(),
+ Some(finder) => finder,
+ };
+ let got = finder.iter(&data);
+ let expected = naive1_iter(needle, &data);
+ TestResult::from_bool(got.eq(expected))
+ }
+
+ fn qc_memchr1_rev_iter(data: Vec<u8>) -> TestResult {
+ let needle = 0;
+
+ let finder = match $mod::One::$cons(needle) {
+ None => return TestResult::discard(),
+ Some(finder) => finder,
+ };
+ let got = finder.iter(&data).rev();
+ let expected = naive1_iter(needle, &data).rev();
+ TestResult::from_bool(got.eq(expected))
+ }
+
+ fn qc_memchr2_iter(data: Vec<u8>) -> TestResult {
+ let needle1 = 0;
+ let needle2 = 1;
+
+ let finder = match $mod::Two::$cons(needle1, needle2) {
+ None => return TestResult::discard(),
+ Some(finder) => finder,
+ };
+ let got = finder.iter(&data);
+ let expected = naive2_iter(needle1, needle2, &data);
+ TestResult::from_bool(got.eq(expected))
+ }
+
+ fn qc_memchr2_rev_iter(data: Vec<u8>) -> TestResult {
+ let needle1 = 0;
+ let needle2 = 1;
+
+ let finder = match $mod::Two::$cons(needle1, needle2) {
+ None => return TestResult::discard(),
+ Some(finder) => finder,
+ };
+ let got = finder.iter(&data).rev();
+ let expected = naive2_iter(needle1, needle2, &data).rev();
+ TestResult::from_bool(got.eq(expected))
+ }
+
+ fn qc_memchr3_iter(data: Vec<u8>) -> TestResult {
+ let needle1 = 0;
+ let needle2 = 1;
+ let needle3 = 2;
+
+ let finder = match $mod::Three::$cons(needle1, needle2, needle3) {
+ None => return TestResult::discard(),
+ Some(finder) => finder,
+ };
+ let got = finder.iter(&data);
+ let expected = naive3_iter(needle1, needle2, needle3, &data);
+ TestResult::from_bool(got.eq(expected))
+ }
+
+ fn qc_memchr3_rev_iter(data: Vec<u8>) -> TestResult {
+ let needle1 = 0;
+ let needle2 = 1;
+ let needle3 = 2;
+
+ let finder = match $mod::Three::$cons(needle1, needle2, needle3) {
+ None => return TestResult::discard(),
+ Some(finder) => finder,
+ };
+ let got = finder.iter(&data).rev();
+ let expected = naive3_iter(needle1, needle2, needle3, &data).rev();
+ TestResult::from_bool(got.eq(expected))
+ }
+
+ fn qc_memchr1_iter_size_hint(data: Vec<u8>) -> TestResult {
+ // test that the size hint is within reasonable bounds
+ let needle = 0;
+ let finder = match $mod::One::$cons(needle) {
+ None => return TestResult::discard(),
+ Some(finder) => finder,
+ };
+ let mut iter = finder.iter(&data);
+ let mut real_count = data
+ .iter()
+ .filter(|&&elt| elt == needle)
+ .count();
+
+ while let Some(index) = iter.next() {
+ real_count -= 1;
+ let (lower, upper) = iter.size_hint();
+ assert!(lower <= real_count);
+ assert!(upper.unwrap() >= real_count);
+ assert!(upper.unwrap() <= data.len() - index);
+ }
+ TestResult::passed()
+ }
+ }
+ };
+}
+
+// take items from a DEI, taking front for each true and back for each false.
+// Return a vector with the concatenation of the fronts and the reverse of the
+// backs.
+#[cfg(not(miri))]
+pub(crate) fn double_ended_take<I, J>(
+ mut iter: I,
+ take_side: J,
+) -> alloc::vec::Vec<I::Item>
+where
+ I: DoubleEndedIterator,
+ J: Iterator<Item = bool>,
+{
+ let mut found_front = alloc::vec![];
+ let mut found_back = alloc::vec![];
+
+ for take_front in take_side {
+ if take_front {
+ if let Some(pos) = iter.next() {
+ found_front.push(pos);
+ } else {
+ break;
+ }
+ } else {
+ if let Some(pos) = iter.next_back() {
+ found_back.push(pos);
+ } else {
+ break;
+ }
+ };
+ }
+
+ let mut all_found = found_front;
+ all_found.extend(found_back.into_iter().rev());
+ all_found
+}
+
+// return an iterator of the 0-based indices of haystack that match the needle
+#[cfg(not(miri))]
+pub(crate) fn naive1_iter<'a>(
+ n1: u8,
+ haystack: &'a [u8],
+) -> impl DoubleEndedIterator<Item = usize> + 'a {
+ haystack.iter().enumerate().filter(move |&(_, &b)| b == n1).map(|t| t.0)
+}
+
+#[cfg(not(miri))]
+pub(crate) fn naive2_iter<'a>(
+ n1: u8,
+ n2: u8,
+ haystack: &'a [u8],
+) -> impl DoubleEndedIterator<Item = usize> + 'a {
+ haystack
+ .iter()
+ .enumerate()
+ .filter(move |&(_, &b)| b == n1 || b == n2)
+ .map(|t| t.0)
+}
+
+#[cfg(not(miri))]
+pub(crate) fn naive3_iter<'a>(
+ n1: u8,
+ n2: u8,
+ n3: u8,
+ haystack: &'a [u8],
+) -> impl DoubleEndedIterator<Item = usize> + 'a {
+ haystack
+ .iter()
+ .enumerate()
+ .filter(move |&(_, &b)| b == n1 || b == n2 || b == n3)
+ .map(|t| t.0)
+}
diff --git a/vendor/memchr/src/tests/memchr/simple.rs b/vendor/memchr/src/tests/memchr/simple.rs
deleted file mode 100644
index bed5b4863..000000000
--- a/vendor/memchr/src/tests/memchr/simple.rs
+++ /dev/null
@@ -1,23 +0,0 @@
-// Simple tests using MIRI. These are intended only to be a simple exercise of
-// memchr when tests are run under miri. These are mostly necessary because the
-// other tests are far more extensive and take too long to run under miri.
-//
-// These tests are also run when the 'std' feature is not enabled.
-
-use crate::{memchr, memchr2, memchr3, memrchr, memrchr2, memrchr3};
-
-#[test]
-fn simple() {
- assert_eq!(memchr(b'a', b"abcda"), Some(0));
- assert_eq!(memchr(b'z', b"abcda"), None);
- assert_eq!(memchr2(b'a', b'z', b"abcda"), Some(0));
- assert_eq!(memchr2(b'z', b'y', b"abcda"), None);
- assert_eq!(memchr3(b'a', b'z', b'b', b"abcda"), Some(0));
- assert_eq!(memchr3(b'z', b'y', b'x', b"abcda"), None);
- assert_eq!(memrchr(b'a', b"abcda"), Some(4));
- assert_eq!(memrchr(b'z', b"abcda"), None);
- assert_eq!(memrchr2(b'a', b'z', b"abcda"), Some(4));
- assert_eq!(memrchr2(b'z', b'y', b"abcda"), None);
- assert_eq!(memrchr3(b'a', b'z', b'b', b"abcda"), Some(4));
- assert_eq!(memrchr3(b'z', b'y', b'x', b"abcda"), None);
-}
diff --git a/vendor/memchr/src/tests/memchr/testdata.rs b/vendor/memchr/src/tests/memchr/testdata.rs
deleted file mode 100644
index 6dda5246f..000000000
--- a/vendor/memchr/src/tests/memchr/testdata.rs
+++ /dev/null
@@ -1,351 +0,0 @@
-use std::iter::repeat;
-
-/// Create a sequence of tests that should be run by memchr implementations.
-pub fn memchr_tests() -> Vec<MemchrTest> {
- let mut tests = Vec::new();
- for statict in MEMCHR_TESTS {
- assert!(!statict.corpus.contains("%"), "% is not allowed in corpora");
- assert!(!statict.corpus.contains("#"), "# is not allowed in corpora");
- assert!(!statict.needles.contains(&b'%'), "% is an invalid needle");
- assert!(!statict.needles.contains(&b'#'), "# is an invalid needle");
-
- let t = MemchrTest {
- corpus: statict.corpus.to_string(),
- needles: statict.needles.to_vec(),
- positions: statict.positions.to_vec(),
- };
- tests.push(t.clone());
- tests.extend(t.expand());
- }
- tests
-}
-
-/// A set of tests for memchr-like functions.
-///
-/// These tests mostly try to cover the short string cases. We cover the longer
-/// string cases via the benchmarks (which are tests themselves), via
-/// quickcheck tests and via automatic expansion of each test case (by
-/// increasing the corpus size). Finally, we cover different alignment cases
-/// in the tests by varying the starting point of the slice.
-const MEMCHR_TESTS: &[MemchrTestStatic] = &[
- // one needle (applied to memchr + memchr2 + memchr3)
- MemchrTestStatic { corpus: "a", needles: &[b'a'], positions: &[0] },
- MemchrTestStatic { corpus: "aa", needles: &[b'a'], positions: &[0, 1] },
- MemchrTestStatic {
- corpus: "aaa",
- needles: &[b'a'],
- positions: &[0, 1, 2],
- },
- MemchrTestStatic { corpus: "", needles: &[b'a'], positions: &[] },
- MemchrTestStatic { corpus: "z", needles: &[b'a'], positions: &[] },
- MemchrTestStatic { corpus: "zz", needles: &[b'a'], positions: &[] },
- MemchrTestStatic { corpus: "zza", needles: &[b'a'], positions: &[2] },
- MemchrTestStatic { corpus: "zaza", needles: &[b'a'], positions: &[1, 3] },
- MemchrTestStatic { corpus: "zzza", needles: &[b'a'], positions: &[3] },
- MemchrTestStatic { corpus: "\x00a", needles: &[b'a'], positions: &[1] },
- MemchrTestStatic { corpus: "\x00", needles: &[b'\x00'], positions: &[0] },
- MemchrTestStatic {
- corpus: "\x00\x00",
- needles: &[b'\x00'],
- positions: &[0, 1],
- },
- MemchrTestStatic {
- corpus: "\x00a\x00",
- needles: &[b'\x00'],
- positions: &[0, 2],
- },
- MemchrTestStatic {
- corpus: "zzzzzzzzzzzzzzzza",
- needles: &[b'a'],
- positions: &[16],
- },
- MemchrTestStatic {
- corpus: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzza",
- needles: &[b'a'],
- positions: &[32],
- },
- // two needles (applied to memchr2 + memchr3)
- MemchrTestStatic {
- corpus: "az",
- needles: &[b'a', b'z'],
- positions: &[0, 1],
- },
- MemchrTestStatic {
- corpus: "az",
- needles: &[b'a', b'z'],
- positions: &[0, 1],
- },
- MemchrTestStatic { corpus: "az", needles: &[b'x', b'y'], positions: &[] },
- MemchrTestStatic { corpus: "az", needles: &[b'a', b'y'], positions: &[0] },
- MemchrTestStatic { corpus: "az", needles: &[b'x', b'z'], positions: &[1] },
- MemchrTestStatic {
- corpus: "yyyyaz",
- needles: &[b'a', b'z'],
- positions: &[4, 5],
- },
- MemchrTestStatic {
- corpus: "yyyyaz",
- needles: &[b'z', b'a'],
- positions: &[4, 5],
- },
- // three needles (applied to memchr3)
- MemchrTestStatic {
- corpus: "xyz",
- needles: &[b'x', b'y', b'z'],
- positions: &[0, 1, 2],
- },
- MemchrTestStatic {
- corpus: "zxy",
- needles: &[b'x', b'y', b'z'],
- positions: &[0, 1, 2],
- },
- MemchrTestStatic {
- corpus: "zxy",
- needles: &[b'x', b'a', b'z'],
- positions: &[0, 1],
- },
- MemchrTestStatic {
- corpus: "zxy",
- needles: &[b't', b'a', b'z'],
- positions: &[0],
- },
- MemchrTestStatic {
- corpus: "yxz",
- needles: &[b't', b'a', b'z'],
- positions: &[2],
- },
-];
-
-/// A description of a test on a memchr like function.
-#[derive(Clone, Debug)]
-pub struct MemchrTest {
- /// The thing to search. We use `&str` instead of `&[u8]` because they
- /// are nicer to write in tests, and we don't miss much since memchr
- /// doesn't care about UTF-8.
- ///
- /// Corpora cannot contain either '%' or '#'. We use these bytes when
- /// expanding test cases into many test cases, and we assume they are not
- /// used. If they are used, `memchr_tests` will panic.
- corpus: String,
- /// The needles to search for. This is intended to be an "alternation" of
- /// needles. The number of needles may cause this test to be skipped for
- /// some memchr variants. For example, a test with 2 needles cannot be used
- /// to test `memchr`, but can be used to test `memchr2` and `memchr3`.
- /// However, a test with only 1 needle can be used to test all of `memchr`,
- /// `memchr2` and `memchr3`. We achieve this by filling in the needles with
- /// bytes that we never used in the corpus (such as '#').
- needles: Vec<u8>,
- /// The positions expected to match for all of the needles.
- positions: Vec<usize>,
-}
-
-/// Like MemchrTest, but easier to define as a constant.
-#[derive(Clone, Debug)]
-pub struct MemchrTestStatic {
- corpus: &'static str,
- needles: &'static [u8],
- positions: &'static [usize],
-}
-
-impl MemchrTest {
- pub fn one<F: Fn(u8, &[u8]) -> Option<usize>>(&self, reverse: bool, f: F) {
- let needles = match self.needles(1) {
- None => return,
- Some(needles) => needles,
- };
- // We test different alignments here. Since some implementations use
- // AVX2, which can read 32 bytes at a time, we test at least that.
- // Moreover, with loop unrolling, we sometimes process 64 (sse2) or 128
- // (avx) bytes at a time, so we include that in our offsets as well.
- //
- // You might think this would cause most needles to not be found, but
- // we actually expand our tests to include corpus sizes all the way up
- // to >500 bytes, so we should exercise most branches.
- for align in 0..130 {
- let corpus = self.corpus(align);
- assert_eq!(
- self.positions(align, reverse).get(0).cloned(),
- f(needles[0], corpus.as_bytes()),
- "search for {:?} failed in: {:?} (len: {}, alignment: {})",
- needles[0] as char,
- corpus,
- corpus.len(),
- align
- );
- }
- }
-
- pub fn two<F: Fn(u8, u8, &[u8]) -> Option<usize>>(
- &self,
- reverse: bool,
- f: F,
- ) {
- let needles = match self.needles(2) {
- None => return,
- Some(needles) => needles,
- };
- for align in 0..130 {
- let corpus = self.corpus(align);
- assert_eq!(
- self.positions(align, reverse).get(0).cloned(),
- f(needles[0], needles[1], corpus.as_bytes()),
- "search for {:?}|{:?} failed in: {:?} \
- (len: {}, alignment: {})",
- needles[0] as char,
- needles[1] as char,
- corpus,
- corpus.len(),
- align
- );
- }
- }
-
- pub fn three<F: Fn(u8, u8, u8, &[u8]) -> Option<usize>>(
- &self,
- reverse: bool,
- f: F,
- ) {
- let needles = match self.needles(3) {
- None => return,
- Some(needles) => needles,
- };
- for align in 0..130 {
- let corpus = self.corpus(align);
- assert_eq!(
- self.positions(align, reverse).get(0).cloned(),
- f(needles[0], needles[1], needles[2], corpus.as_bytes()),
- "search for {:?}|{:?}|{:?} failed in: {:?} \
- (len: {}, alignment: {})",
- needles[0] as char,
- needles[1] as char,
- needles[2] as char,
- corpus,
- corpus.len(),
- align
- );
- }
- }
-
- pub fn iter_one<'a, I, F>(&'a self, reverse: bool, f: F)
- where
- F: FnOnce(u8, &'a [u8]) -> I,
- I: Iterator<Item = usize>,
- {
- if let Some(ns) = self.needles(1) {
- self.iter(reverse, f(ns[0], self.corpus.as_bytes()));
- }
- }
-
- pub fn iter_two<'a, I, F>(&'a self, reverse: bool, f: F)
- where
- F: FnOnce(u8, u8, &'a [u8]) -> I,
- I: Iterator<Item = usize>,
- {
- if let Some(ns) = self.needles(2) {
- self.iter(reverse, f(ns[0], ns[1], self.corpus.as_bytes()));
- }
- }
-
- pub fn iter_three<'a, I, F>(&'a self, reverse: bool, f: F)
- where
- F: FnOnce(u8, u8, u8, &'a [u8]) -> I,
- I: Iterator<Item = usize>,
- {
- if let Some(ns) = self.needles(3) {
- self.iter(reverse, f(ns[0], ns[1], ns[2], self.corpus.as_bytes()));
- }
- }
-
- /// Test that the positions yielded by the given iterator match the
- /// positions in this test. If reverse is true, then reverse the positions
- /// before comparing them.
- fn iter<I: Iterator<Item = usize>>(&self, reverse: bool, it: I) {
- assert_eq!(
- self.positions(0, reverse),
- it.collect::<Vec<usize>>(),
- r"search for {:?} failed in: {:?}",
- self.needles.iter().map(|&b| b as char).collect::<Vec<char>>(),
- self.corpus
- );
- }
-
- /// Expand this test into many variations of the same test.
- ///
- /// In particular, this will generate more tests with larger corpus sizes.
- /// The expected positions are updated to maintain the integrity of the
- /// test.
- ///
- /// This is important in testing a memchr implementation, because there are
- /// often different cases depending on the length of the corpus.
- ///
- /// Note that we extend the corpus by adding `%` bytes, which we
- /// don't otherwise use as a needle.
- fn expand(&self) -> Vec<MemchrTest> {
- let mut more = Vec::new();
-
- // Add bytes to the start of the corpus.
- for i in 1..515 {
- let mut t = self.clone();
- let mut new_corpus: String = repeat('%').take(i).collect();
- new_corpus.push_str(&t.corpus);
- t.corpus = new_corpus;
- t.positions = t.positions.into_iter().map(|p| p + i).collect();
- more.push(t);
- }
- // Add bytes to the end of the corpus.
- for i in 1..515 {
- let mut t = self.clone();
- let padding: String = repeat('%').take(i).collect();
- t.corpus.push_str(&padding);
- more.push(t);
- }
-
- more
- }
-
- /// Return the corpus at the given alignment.
- ///
- /// If the alignment exceeds the length of the corpus, then this returns
- /// an empty slice.
- fn corpus(&self, align: usize) -> &str {
- self.corpus.get(align..).unwrap_or("")
- }
-
- /// Return exactly `count` needles from this test. If this test has less
- /// than `count` needles, then add `#` until the number of needles
- /// matches `count`. If this test has more than `count` needles, then
- /// return `None` (because there is no way to use this test data for a
- /// search using fewer needles).
- fn needles(&self, count: usize) -> Option<Vec<u8>> {
- if self.needles.len() > count {
- return None;
- }
-
- let mut needles = self.needles.to_vec();
- for _ in needles.len()..count {
- // we assume # is never used in tests.
- needles.push(b'#');
- }
- Some(needles)
- }
-
- /// Return the positions in this test, reversed if `reverse` is true.
- ///
- /// If alignment is given, then all positions greater than or equal to that
- /// alignment are offset by the alignment. Positions less than the
- /// alignment are dropped.
- fn positions(&self, align: usize, reverse: bool) -> Vec<usize> {
- let positions = if reverse {
- let mut positions = self.positions.to_vec();
- positions.reverse();
- positions
- } else {
- self.positions.to_vec()
- };
- positions
- .into_iter()
- .filter(|&p| p >= align)
- .map(|p| p - align)
- .collect()
- }
-}
diff --git a/vendor/memchr/src/tests/mod.rs b/vendor/memchr/src/tests/mod.rs
index f4d406cd1..259b67827 100644
--- a/vendor/memchr/src/tests/mod.rs
+++ b/vendor/memchr/src/tests/mod.rs
@@ -1,15 +1,15 @@
-mod memchr;
+#[macro_use]
+pub(crate) mod memchr;
+pub(crate) mod packedpair;
+#[macro_use]
+pub(crate) mod substring;
// For debugging, particularly in CI, print out the byte order of the current
// target.
-#[cfg(all(feature = "std", target_endian = "little"))]
#[test]
fn byte_order() {
- eprintln!("LITTLE ENDIAN");
-}
-
-#[cfg(all(feature = "std", target_endian = "big"))]
-#[test]
-fn byte_order() {
- eprintln!("BIG ENDIAN");
+ #[cfg(target_endian = "little")]
+ std::eprintln!("LITTLE ENDIAN");
+ #[cfg(target_endian = "big")]
+ std::eprintln!("BIG ENDIAN");
}
diff --git a/vendor/memchr/src/tests/packedpair.rs b/vendor/memchr/src/tests/packedpair.rs
new file mode 100644
index 000000000..204635b83
--- /dev/null
+++ b/vendor/memchr/src/tests/packedpair.rs
@@ -0,0 +1,216 @@
+use alloc::{boxed::Box, vec, vec::Vec};
+
+/// A set of "packed pair" test seeds. Each seed serves as the base for the
+/// generation of many other tests. In essence, the seed captures the pair of
+/// bytes we used for a predicate and first byte among our needle. The tests
+/// generated from each seed essentially vary the length of the needle and
+/// haystack, while using the rare/first byte configuration from the seed.
+///
+/// The purpose of this is to test many different needle/haystack lengths.
+/// In particular, some of the vector optimizations might only have bugs
+/// in haystacks of a certain size.
+const SEEDS: &[Seed] = &[
+ // Why not use different 'first' bytes? It seemed like a good idea to be
+ // able to configure it, but when I wrote the test generator below, it
+ // didn't seem necessary to use for reasons that I forget.
+ Seed { first: b'x', index1: b'y', index2: b'z' },
+ Seed { first: b'x', index1: b'x', index2: b'z' },
+ Seed { first: b'x', index1: b'y', index2: b'x' },
+ Seed { first: b'x', index1: b'x', index2: b'x' },
+ Seed { first: b'x', index1: b'y', index2: b'y' },
+];
+
+/// Runs a host of "packed pair" search tests.
+///
+/// These tests specifically look for the occurrence of a possible substring
+/// match based on a pair of bytes matching at the right offsets.
+pub(crate) struct Runner {
+ fwd: Option<
+ Box<
+ dyn FnMut(&[u8], &[u8], u8, u8) -> Option<Option<usize>> + 'static,
+ >,
+ >,
+}
+
+impl Runner {
+ /// Create a new test runner for "packed pair" substring search.
+ pub(crate) fn new() -> Runner {
+ Runner { fwd: None }
+ }
+
+ /// Run all tests. This panics on the first failure.
+ ///
+ /// If the implementation being tested returns `None` for a particular
+ /// haystack/needle combination, then that test is skipped.
+ ///
+ /// This runs tests on both the forward and reverse implementations given.
+ /// If either (or both) are missing, then tests for that implementation are
+ /// skipped.
+ pub(crate) fn run(self) {
+ if let Some(mut fwd) = self.fwd {
+ for seed in SEEDS.iter() {
+ for t in seed.generate() {
+ match fwd(&t.haystack, &t.needle, t.index1, t.index2) {
+ None => continue,
+ Some(result) => {
+ assert_eq!(
+ t.fwd, result,
+ "FORWARD, needle: {:?}, haystack: {:?}, \
+ index1: {:?}, index2: {:?}",
+ t.needle, t.haystack, t.index1, t.index2,
+ )
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /// Set the implementation for forward "packed pair" substring search.
+ ///
+ /// If the closure returns `None`, then it is assumed that the given
+ /// test cannot be applied to the particular implementation and it is
+ /// skipped. For example, if a particular implementation only supports
+ /// needles or haystacks for some minimum length.
+ ///
+ /// If this is not set, then forward "packed pair" search is not tested.
+ pub(crate) fn fwd(
+ mut self,
+ search: impl FnMut(&[u8], &[u8], u8, u8) -> Option<Option<usize>> + 'static,
+ ) -> Runner {
+ self.fwd = Some(Box::new(search));
+ self
+ }
+}
+
+/// A test that represents the input and expected output to a "packed pair"
+/// search function. The test should be able to run with any "packed pair"
+/// implementation and get the expected output.
+struct Test {
+ haystack: Vec<u8>,
+ needle: Vec<u8>,
+ index1: u8,
+ index2: u8,
+ fwd: Option<usize>,
+}
+
+impl Test {
+ /// Create a new "packed pair" test from a seed and some given offsets to
+ /// the pair of bytes to use as a predicate in the seed's needle.
+ ///
+ /// If a valid test could not be constructed, then None is returned.
+ /// (Currently, we take the approach of massaging tests to be valid
+ /// instead of rejecting them outright.)
+ fn new(
+ seed: Seed,
+ index1: usize,
+ index2: usize,
+ haystack_len: usize,
+ needle_len: usize,
+ fwd: Option<usize>,
+ ) -> Option<Test> {
+ let mut index1: u8 = index1.try_into().unwrap();
+ let mut index2: u8 = index2.try_into().unwrap();
+ // The '#' byte is never used in a haystack (unless we're expecting
+ // a match), while the '@' byte is never used in a needle.
+ let mut haystack = vec![b'@'; haystack_len];
+ let mut needle = vec![b'#'; needle_len];
+ needle[0] = seed.first;
+ needle[index1 as usize] = seed.index1;
+ needle[index2 as usize] = seed.index2;
+ // If we're expecting a match, then make sure the needle occurs
+ // in the haystack at the expected position.
+ if let Some(i) = fwd {
+ haystack[i..i + needle.len()].copy_from_slice(&needle);
+ }
+ // If the operations above lead to rare offsets pointing to the
+ // non-first occurrence of a byte, then adjust it. This might lead
+ // to redundant tests, but it's simpler than trying to change the
+ // generation process I think.
+ if let Some(i) = crate::memchr(seed.index1, &needle) {
+ index1 = u8::try_from(i).unwrap();
+ }
+ if let Some(i) = crate::memchr(seed.index2, &needle) {
+ index2 = u8::try_from(i).unwrap();
+ }
+ Some(Test { haystack, needle, index1, index2, fwd })
+ }
+}
+
+/// Data that describes a single prefilter test seed.
+#[derive(Clone, Copy)]
+struct Seed {
+ first: u8,
+ index1: u8,
+ index2: u8,
+}
+
+impl Seed {
+ const NEEDLE_LENGTH_LIMIT: usize = {
+ #[cfg(not(miri))]
+ {
+ 33
+ }
+ #[cfg(miri)]
+ {
+ 5
+ }
+ };
+
+ const HAYSTACK_LENGTH_LIMIT: usize = {
+ #[cfg(not(miri))]
+ {
+ 65
+ }
+ #[cfg(miri)]
+ {
+ 8
+ }
+ };
+
+ /// Generate a series of prefilter tests from this seed.
+ fn generate(self) -> impl Iterator<Item = Test> {
+ let len_start = 2;
+ // The iterator below generates *a lot* of tests. The number of
+ // tests was chosen somewhat empirically to be "bearable" when
+ // running the test suite.
+ //
+ // We use an iterator here because the collective haystacks of all
+ // these test cases add up to enough memory to OOM a conservative
+ // sandbox or a small laptop.
+ (len_start..=Seed::NEEDLE_LENGTH_LIMIT).flat_map(move |needle_len| {
+ let index_start = len_start - 1;
+ (index_start..needle_len).flat_map(move |index1| {
+ (index1..needle_len).flat_map(move |index2| {
+ (needle_len..=Seed::HAYSTACK_LENGTH_LIMIT).flat_map(
+ move |haystack_len| {
+ Test::new(
+ self,
+ index1,
+ index2,
+ haystack_len,
+ needle_len,
+ None,
+ )
+ .into_iter()
+ .chain(
+ (0..=(haystack_len - needle_len)).flat_map(
+ move |output| {
+ Test::new(
+ self,
+ index1,
+ index2,
+ haystack_len,
+ needle_len,
+ Some(output),
+ )
+ },
+ ),
+ )
+ },
+ )
+ })
+ })
+ })
+ }
+}
diff --git a/vendor/memchr/src/tests/substring/mod.rs b/vendor/memchr/src/tests/substring/mod.rs
new file mode 100644
index 000000000..dd10cbdd4
--- /dev/null
+++ b/vendor/memchr/src/tests/substring/mod.rs
@@ -0,0 +1,232 @@
+/*!
+This module defines tests and test helpers for substring implementations.
+*/
+
+use alloc::{
+ boxed::Box,
+ format,
+ string::{String, ToString},
+};
+
+pub(crate) mod naive;
+#[macro_use]
+pub(crate) mod prop;
+
+const SEEDS: &'static [Seed] = &[
+ Seed::new("", "", Some(0), Some(0)),
+ Seed::new("", "a", Some(0), Some(1)),
+ Seed::new("", "ab", Some(0), Some(2)),
+ Seed::new("", "abc", Some(0), Some(3)),
+ Seed::new("a", "", None, None),
+ Seed::new("a", "a", Some(0), Some(0)),
+ Seed::new("a", "aa", Some(0), Some(1)),
+ Seed::new("a", "ba", Some(1), Some(1)),
+ Seed::new("a", "bba", Some(2), Some(2)),
+ Seed::new("a", "bbba", Some(3), Some(3)),
+ Seed::new("a", "bbbab", Some(3), Some(3)),
+ Seed::new("a", "bbbabb", Some(3), Some(3)),
+ Seed::new("a", "bbbabbb", Some(3), Some(3)),
+ Seed::new("a", "bbbbbb", None, None),
+ Seed::new("ab", "", None, None),
+ Seed::new("ab", "a", None, None),
+ Seed::new("ab", "b", None, None),
+ Seed::new("ab", "ab", Some(0), Some(0)),
+ Seed::new("ab", "aab", Some(1), Some(1)),
+ Seed::new("ab", "aaab", Some(2), Some(2)),
+ Seed::new("ab", "abaab", Some(0), Some(3)),
+ Seed::new("ab", "baaab", Some(3), Some(3)),
+ Seed::new("ab", "acb", None, None),
+ Seed::new("ab", "abba", Some(0), Some(0)),
+ Seed::new("abc", "ab", None, None),
+ Seed::new("abc", "abc", Some(0), Some(0)),
+ Seed::new("abc", "abcz", Some(0), Some(0)),
+ Seed::new("abc", "abczz", Some(0), Some(0)),
+ Seed::new("abc", "zabc", Some(1), Some(1)),
+ Seed::new("abc", "zzabc", Some(2), Some(2)),
+ Seed::new("abc", "azbc", None, None),
+ Seed::new("abc", "abzc", None, None),
+ Seed::new("abczdef", "abczdefzzzzzzzzzzzzzzzzzzzz", Some(0), Some(0)),
+ Seed::new("abczdef", "zzzzzzzzzzzzzzzzzzzzabczdef", Some(20), Some(20)),
+ Seed::new(
+ "xyz",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaxyz",
+ Some(32),
+ Some(32),
+ ),
+ Seed::new("\u{0}\u{15}", "\u{0}\u{15}\u{15}\u{0}", Some(0), Some(0)),
+ Seed::new("\u{0}\u{1e}", "\u{1e}\u{0}", None, None),
+];
+
+/// Runs a host of substring search tests.
+///
+/// This has support for "partial" substring search implementations only work
+/// for a subset of needles/haystacks. For example, the "packed pair" substring
+/// search implementation only works for haystacks of some minimum length based
+/// of the pair of bytes selected and the size of the vector used.
+pub(crate) struct Runner {
+ fwd: Option<
+ Box<dyn FnMut(&[u8], &[u8]) -> Option<Option<usize>> + 'static>,
+ >,
+ rev: Option<
+ Box<dyn FnMut(&[u8], &[u8]) -> Option<Option<usize>> + 'static>,
+ >,
+}
+
+impl Runner {
+ /// Create a new test runner for forward and reverse substring search
+ /// implementations.
+ pub(crate) fn new() -> Runner {
+ Runner { fwd: None, rev: None }
+ }
+
+ /// Run all tests. This panics on the first failure.
+ ///
+ /// If the implementation being tested returns `None` for a particular
+ /// haystack/needle combination, then that test is skipped.
+ ///
+ /// This runs tests on both the forward and reverse implementations given.
+ /// If either (or both) are missing, then tests for that implementation are
+ /// skipped.
+ pub(crate) fn run(self) {
+ if let Some(mut fwd) = self.fwd {
+ for seed in SEEDS.iter() {
+ for t in seed.generate() {
+ match fwd(t.haystack.as_bytes(), t.needle.as_bytes()) {
+ None => continue,
+ Some(result) => {
+ assert_eq!(
+ t.fwd, result,
+ "FORWARD, needle: {:?}, haystack: {:?}",
+ t.needle, t.haystack,
+ );
+ }
+ }
+ }
+ }
+ }
+ if let Some(mut rev) = self.rev {
+ for seed in SEEDS.iter() {
+ for t in seed.generate() {
+ match rev(t.haystack.as_bytes(), t.needle.as_bytes()) {
+ None => continue,
+ Some(result) => {
+ assert_eq!(
+ t.rev, result,
+ "REVERSE, needle: {:?}, haystack: {:?}",
+ t.needle, t.haystack,
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /// Set the implementation for forward substring search.
+ ///
+ /// If the closure returns `None`, then it is assumed that the given
+ /// test cannot be applied to the particular implementation and it is
+ /// skipped. For example, if a particular implementation only supports
+ /// needles or haystacks for some minimum length.
+ ///
+ /// If this is not set, then forward substring search is not tested.
+ pub(crate) fn fwd(
+ mut self,
+ search: impl FnMut(&[u8], &[u8]) -> Option<Option<usize>> + 'static,
+ ) -> Runner {
+ self.fwd = Some(Box::new(search));
+ self
+ }
+
+ /// Set the implementation for reverse substring search.
+ ///
+ /// If the closure returns `None`, then it is assumed that the given
+ /// test cannot be applied to the particular implementation and it is
+ /// skipped. For example, if a particular implementation only supports
+ /// needles or haystacks for some minimum length.
+ ///
+ /// If this is not set, then reverse substring search is not tested.
+ pub(crate) fn rev(
+ mut self,
+ search: impl FnMut(&[u8], &[u8]) -> Option<Option<usize>> + 'static,
+ ) -> Runner {
+ self.rev = Some(Box::new(search));
+ self
+ }
+}
+
+/// A single substring test for forward and reverse searches.
+#[derive(Clone, Debug)]
+struct Test {
+ needle: String,
+ haystack: String,
+ fwd: Option<usize>,
+ rev: Option<usize>,
+}
+
+/// A single substring test for forward and reverse searches.
+///
+/// Each seed is valid on its own, but it also serves as a starting point
+/// to generate more tests. Namely, we pad out the haystacks with other
+/// characters so that we get more complete coverage. This is especially useful
+/// for testing vector algorithms that tend to have weird special cases for
+/// alignment and loop unrolling.
+///
+/// Padding works by assuming certain characters never otherwise appear in a
+/// needle or a haystack. Neither should contain a `#` character.
+#[derive(Clone, Copy, Debug)]
+struct Seed {
+ needle: &'static str,
+ haystack: &'static str,
+ fwd: Option<usize>,
+ rev: Option<usize>,
+}
+
+impl Seed {
+ const MAX_PAD: usize = 34;
+
+ const fn new(
+ needle: &'static str,
+ haystack: &'static str,
+ fwd: Option<usize>,
+ rev: Option<usize>,
+ ) -> Seed {
+ Seed { needle, haystack, fwd, rev }
+ }
+
+ fn generate(self) -> impl Iterator<Item = Test> {
+ assert!(!self.needle.contains('#'), "needle must not contain '#'");
+ assert!(!self.haystack.contains('#'), "haystack must not contain '#'");
+ (0..=Seed::MAX_PAD)
+ // Generate tests for padding at the beginning of haystack.
+ .map(move |pad| {
+ let needle = self.needle.to_string();
+ let prefix = "#".repeat(pad);
+ let haystack = format!("{}{}", prefix, self.haystack);
+ let fwd = if needle.is_empty() {
+ Some(0)
+ } else {
+ self.fwd.map(|i| pad + i)
+ };
+ let rev = if needle.is_empty() {
+ Some(haystack.len())
+ } else {
+ self.rev.map(|i| pad + i)
+ };
+ Test { needle, haystack, fwd, rev }
+ })
+ // Generate tests for padding at the end of haystack.
+ .chain((1..=Seed::MAX_PAD).map(move |pad| {
+ let needle = self.needle.to_string();
+ let suffix = "#".repeat(pad);
+ let haystack = format!("{}{}", self.haystack, suffix);
+ let fwd = if needle.is_empty() { Some(0) } else { self.fwd };
+ let rev = if needle.is_empty() {
+ Some(haystack.len())
+ } else {
+ self.rev
+ };
+ Test { needle, haystack, fwd, rev }
+ }))
+ }
+}
diff --git a/vendor/memchr/src/tests/substring/naive.rs b/vendor/memchr/src/tests/substring/naive.rs
new file mode 100644
index 000000000..1bc600984
--- /dev/null
+++ b/vendor/memchr/src/tests/substring/naive.rs
@@ -0,0 +1,45 @@
+/*!
+This module defines "naive" implementations of substring search.
+
+These are sometimes useful to compare with "real" substring implementations.
+The idea is that they are so simple that they are unlikely to be incorrect.
+*/
+
+/// Naively search forwards for the given needle in the given haystack.
+pub(crate) fn find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
+ let end = haystack.len().checked_sub(needle.len()).map_or(0, |i| i + 1);
+ for i in 0..end {
+ if needle == &haystack[i..i + needle.len()] {
+ return Some(i);
+ }
+ }
+ None
+}
+
+/// Naively search in reverse for the given needle in the given haystack.
+pub(crate) fn rfind(haystack: &[u8], needle: &[u8]) -> Option<usize> {
+ let end = haystack.len().checked_sub(needle.len()).map_or(0, |i| i + 1);
+ for i in (0..end).rev() {
+ if needle == &haystack[i..i + needle.len()] {
+ return Some(i);
+ }
+ }
+ None
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::tests::substring;
+
+ use super::*;
+
+ #[test]
+ fn forward() {
+ substring::Runner::new().fwd(|h, n| Some(find(h, n))).run()
+ }
+
+ #[test]
+ fn reverse() {
+ substring::Runner::new().rev(|h, n| Some(rfind(h, n))).run()
+ }
+}
diff --git a/vendor/memchr/src/tests/substring/prop.rs b/vendor/memchr/src/tests/substring/prop.rs
new file mode 100644
index 000000000..a8352ec74
--- /dev/null
+++ b/vendor/memchr/src/tests/substring/prop.rs
@@ -0,0 +1,126 @@
+/*!
+This module defines a few quickcheck properties for substring search.
+
+It also provides a forward and reverse macro for conveniently defining
+quickcheck tests that run these properties over any substring search
+implementation.
+*/
+
+use crate::tests::substring::naive;
+
+/// $fwd is a `impl FnMut(haystack, needle) -> Option<Option<usize>>`. When the
+/// routine returns `None`, then it's skipped, which is useful for substring
+/// implementations that don't work for all inputs.
+#[macro_export]
+macro_rules! define_substring_forward_quickcheck {
+ ($fwd:expr) => {
+ #[cfg(not(miri))]
+ quickcheck::quickcheck! {
+ fn qc_fwd_prefix_is_substring(bs: alloc::vec::Vec<u8>) -> bool {
+ crate::tests::substring::prop::prefix_is_substring(&bs, $fwd)
+ }
+
+ fn qc_fwd_suffix_is_substring(bs: alloc::vec::Vec<u8>) -> bool {
+ crate::tests::substring::prop::suffix_is_substring(&bs, $fwd)
+ }
+
+ fn qc_fwd_matches_naive(
+ haystack: alloc::vec::Vec<u8>,
+ needle: alloc::vec::Vec<u8>
+ ) -> bool {
+ crate::tests::substring::prop::same_as_naive(
+ false,
+ &haystack,
+ &needle,
+ $fwd,
+ )
+ }
+ }
+ };
+}
+
+/// $rev is a `impl FnMut(haystack, needle) -> Option<Option<usize>>`. When the
+/// routine returns `None`, then it's skipped, which is useful for substring
+/// implementations that don't work for all inputs.
+#[macro_export]
+macro_rules! define_substring_reverse_quickcheck {
+ ($rev:expr) => {
+ #[cfg(not(miri))]
+ quickcheck::quickcheck! {
+ fn qc_rev_prefix_is_substring(bs: alloc::vec::Vec<u8>) -> bool {
+ crate::tests::substring::prop::prefix_is_substring(&bs, $rev)
+ }
+
+ fn qc_rev_suffix_is_substring(bs: alloc::vec::Vec<u8>) -> bool {
+ crate::tests::substring::prop::suffix_is_substring(&bs, $rev)
+ }
+
+ fn qc_rev_matches_naive(
+ haystack: alloc::vec::Vec<u8>,
+ needle: alloc::vec::Vec<u8>
+ ) -> bool {
+ crate::tests::substring::prop::same_as_naive(
+ true,
+ &haystack,
+ &needle,
+ $rev,
+ )
+ }
+ }
+ };
+}
+
+/// Check that every prefix of the given byte string is a substring.
+pub(crate) fn prefix_is_substring(
+ bs: &[u8],
+ mut search: impl FnMut(&[u8], &[u8]) -> Option<Option<usize>>,
+) -> bool {
+ for i in 0..bs.len().saturating_sub(1) {
+ let prefix = &bs[..i];
+ let result = match search(bs, prefix) {
+ None => continue,
+ Some(result) => result,
+ };
+ if !result.is_some() {
+ return false;
+ }
+ }
+ true
+}
+
+/// Check that every suffix of the given byte string is a substring.
+pub(crate) fn suffix_is_substring(
+ bs: &[u8],
+ mut search: impl FnMut(&[u8], &[u8]) -> Option<Option<usize>>,
+) -> bool {
+ for i in 0..bs.len().saturating_sub(1) {
+ let suffix = &bs[i..];
+ let result = match search(bs, suffix) {
+ None => continue,
+ Some(result) => result,
+ };
+ if !result.is_some() {
+ return false;
+ }
+ }
+ true
+}
+
+/// Check that naive substring search matches the result of the given search
+/// algorithm.
+pub(crate) fn same_as_naive(
+ reverse: bool,
+ haystack: &[u8],
+ needle: &[u8],
+ mut search: impl FnMut(&[u8], &[u8]) -> Option<Option<usize>>,
+) -> bool {
+ let result = match search(haystack, needle) {
+ None => return true,
+ Some(result) => result,
+ };
+ if reverse {
+ result == naive::rfind(haystack, needle)
+ } else {
+ result == naive::find(haystack, needle)
+ }
+}
diff --git a/vendor/memchr/src/vector.rs b/vendor/memchr/src/vector.rs
new file mode 100644
index 000000000..f360176c3
--- /dev/null
+++ b/vendor/memchr/src/vector.rs
@@ -0,0 +1,515 @@
+/// A trait for describing vector operations used by vectorized searchers.
+///
+/// The trait is highly constrained to low level vector operations needed.
+/// In general, it was invented mostly to be generic over x86's __m128i and
+/// __m256i types. At time of writing, it also supports wasm and aarch64
+/// 128-bit vector types as well.
+///
+/// # Safety
+///
+/// All methods are not safe since they are intended to be implemented using
+/// vendor intrinsics, which are also not safe. Callers must ensure that the
+/// appropriate target features are enabled in the calling function, and that
+/// the current CPU supports them. All implementations should avoid marking the
+/// routines with #[target_feature] and instead mark them as #[inline(always)]
+/// to ensure they get appropriately inlined. (inline(always) cannot be used
+/// with target_feature.)
+pub(crate) trait Vector: Copy + core::fmt::Debug {
+ /// The number of bits in the vector.
+ const BITS: usize;
+ /// The number of bytes in the vector. That is, this is the size of the
+ /// vector in memory.
+ const BYTES: usize;
+ /// The bits that must be zero in order for a `*const u8` pointer to be
+ /// correctly aligned to read vector values.
+ const ALIGN: usize;
+
+ /// The type of the value returned by `Vector::movemask`.
+ ///
+ /// This supports abstracting over the specific representation used in
+ /// order to accommodate different representations in different ISAs.
+ type Mask: MoveMask;
+
+ /// Create a vector with 8-bit lanes with the given byte repeated into each
+ /// lane.
+ unsafe fn splat(byte: u8) -> Self;
+
+ /// Read a vector-size number of bytes from the given pointer. The pointer
+ /// must be aligned to the size of the vector.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that at least `BYTES` bytes are readable from
+ /// `data` and that `data` is aligned to a `BYTES` boundary.
+ unsafe fn load_aligned(data: *const u8) -> Self;
+
+ /// Read a vector-size number of bytes from the given pointer. The pointer
+ /// does not need to be aligned.
+ ///
+ /// # Safety
+ ///
+ /// Callers must guarantee that at least `BYTES` bytes are readable from
+ /// `data`.
+ unsafe fn load_unaligned(data: *const u8) -> Self;
+
+ /// _mm_movemask_epi8 or _mm256_movemask_epi8
+ unsafe fn movemask(self) -> Self::Mask;
+ /// _mm_cmpeq_epi8 or _mm256_cmpeq_epi8
+ unsafe fn cmpeq(self, vector2: Self) -> Self;
+ /// _mm_and_si128 or _mm256_and_si256
+ unsafe fn and(self, vector2: Self) -> Self;
+ /// _mm_or or _mm256_or_si256
+ unsafe fn or(self, vector2: Self) -> Self;
+ /// Returns true if and only if `Self::movemask` would return a mask that
+ /// contains at least one non-zero bit.
+ unsafe fn movemask_will_have_non_zero(self) -> bool {
+ self.movemask().has_non_zero()
+ }
+}
+
+/// A trait that abstracts over a vector-to-scalar operation called
+/// "move mask."
+///
+/// On x86-64, this is `_mm_movemask_epi8` for SSE2 and `_mm256_movemask_epi8`
+/// for AVX2. It takes a vector of `u8` lanes and returns a scalar where the
+/// `i`th bit is set if and only if the most significant bit in the `i`th lane
+/// of the vector is set. The simd128 ISA for wasm32 also supports this
+/// exact same operation natively.
+///
+/// ... But aarch64 doesn't. So we have to fake it with more instructions and
+/// a slightly different representation. We could do extra work to unify the
+/// representations, but then would require additional costs in the hot path
+/// for `memchr` and `packedpair`. So instead, we abstraction over the specific
+/// representation with this trait an ddefine the operations we actually need.
+pub(crate) trait MoveMask: Copy + core::fmt::Debug {
+ /// Return a mask that is all zeros except for the least significant `n`
+ /// lanes in a corresponding vector.
+ fn all_zeros_except_least_significant(n: usize) -> Self;
+
+ /// Returns true if and only if this mask has a a non-zero bit anywhere.
+ fn has_non_zero(self) -> bool;
+
+ /// Returns the number of bits set to 1 in this mask.
+ fn count_ones(self) -> usize;
+
+ /// Does a bitwise `and` operation between `self` and `other`.
+ fn and(self, other: Self) -> Self;
+
+ /// Does a bitwise `or` operation between `self` and `other`.
+ fn or(self, other: Self) -> Self;
+
+ /// Returns a mask that is equivalent to `self` but with the least
+ /// significant 1-bit set to 0.
+ fn clear_least_significant_bit(self) -> Self;
+
+ /// Returns the offset of the first non-zero lane this mask represents.
+ fn first_offset(self) -> usize;
+
+ /// Returns the offset of the last non-zero lane this mask represents.
+ fn last_offset(self) -> usize;
+}
+
+/// This is a "sensible" movemask implementation where each bit represents
+/// whether the most significant bit is set in each corresponding lane of a
+/// vector. This is used on x86-64 and wasm, but such a mask is more expensive
+/// to get on aarch64 so we use something a little different.
+///
+/// We call this "sensible" because this is what we get using native sse/avx
+/// movemask instructions. But neon has no such native equivalent.
+#[derive(Clone, Copy, Debug)]
+pub(crate) struct SensibleMoveMask(u32);
+
+impl SensibleMoveMask {
+ /// Get the mask in a form suitable for computing offsets.
+ ///
+ /// Basically, this normalizes to little endian. On big endian, this swaps
+ /// the bytes.
+ #[inline(always)]
+ fn get_for_offset(self) -> u32 {
+ #[cfg(target_endian = "big")]
+ {
+ self.0.swap_bytes()
+ }
+ #[cfg(target_endian = "little")]
+ {
+ self.0
+ }
+ }
+}
+
+impl MoveMask for SensibleMoveMask {
+ #[inline(always)]
+ fn all_zeros_except_least_significant(n: usize) -> SensibleMoveMask {
+ debug_assert!(n < 32);
+ SensibleMoveMask(!((1 << n) - 1))
+ }
+
+ #[inline(always)]
+ fn has_non_zero(self) -> bool {
+ self.0 != 0
+ }
+
+ #[inline(always)]
+ fn count_ones(self) -> usize {
+ self.0.count_ones() as usize
+ }
+
+ #[inline(always)]
+ fn and(self, other: SensibleMoveMask) -> SensibleMoveMask {
+ SensibleMoveMask(self.0 & other.0)
+ }
+
+ #[inline(always)]
+ fn or(self, other: SensibleMoveMask) -> SensibleMoveMask {
+ SensibleMoveMask(self.0 | other.0)
+ }
+
+ #[inline(always)]
+ fn clear_least_significant_bit(self) -> SensibleMoveMask {
+ SensibleMoveMask(self.0 & (self.0 - 1))
+ }
+
+ #[inline(always)]
+ fn first_offset(self) -> usize {
+ // We are dealing with little endian here (and if we aren't, we swap
+ // the bytes so we are in practice), where the most significant byte
+ // is at a higher address. That means the least significant bit that
+ // is set corresponds to the position of our first matching byte.
+ // That position corresponds to the number of zeros after the least
+ // significant bit.
+ self.get_for_offset().trailing_zeros() as usize
+ }
+
+ #[inline(always)]
+ fn last_offset(self) -> usize {
+ // We are dealing with little endian here (and if we aren't, we swap
+ // the bytes so we are in practice), where the most significant byte is
+ // at a higher address. That means the most significant bit that is set
+ // corresponds to the position of our last matching byte. The position
+ // from the end of the mask is therefore the number of leading zeros
+ // in a 32 bit integer, and the position from the start of the mask is
+ // therefore 32 - (leading zeros) - 1.
+ 32 - self.get_for_offset().leading_zeros() as usize - 1
+ }
+}
+
+#[cfg(target_arch = "x86_64")]
+mod x86sse2 {
+ use core::arch::x86_64::*;
+
+ use super::{SensibleMoveMask, Vector};
+
+ impl Vector for __m128i {
+ const BITS: usize = 128;
+ const BYTES: usize = 16;
+ const ALIGN: usize = Self::BYTES - 1;
+
+ type Mask = SensibleMoveMask;
+
+ #[inline(always)]
+ unsafe fn splat(byte: u8) -> __m128i {
+ _mm_set1_epi8(byte as i8)
+ }
+
+ #[inline(always)]
+ unsafe fn load_aligned(data: *const u8) -> __m128i {
+ _mm_load_si128(data as *const __m128i)
+ }
+
+ #[inline(always)]
+ unsafe fn load_unaligned(data: *const u8) -> __m128i {
+ _mm_loadu_si128(data as *const __m128i)
+ }
+
+ #[inline(always)]
+ unsafe fn movemask(self) -> SensibleMoveMask {
+ SensibleMoveMask(_mm_movemask_epi8(self) as u32)
+ }
+
+ #[inline(always)]
+ unsafe fn cmpeq(self, vector2: Self) -> __m128i {
+ _mm_cmpeq_epi8(self, vector2)
+ }
+
+ #[inline(always)]
+ unsafe fn and(self, vector2: Self) -> __m128i {
+ _mm_and_si128(self, vector2)
+ }
+
+ #[inline(always)]
+ unsafe fn or(self, vector2: Self) -> __m128i {
+ _mm_or_si128(self, vector2)
+ }
+ }
+}
+
+#[cfg(target_arch = "x86_64")]
+mod x86avx2 {
+ use core::arch::x86_64::*;
+
+ use super::{SensibleMoveMask, Vector};
+
+ impl Vector for __m256i {
+ const BITS: usize = 256;
+ const BYTES: usize = 32;
+ const ALIGN: usize = Self::BYTES - 1;
+
+ type Mask = SensibleMoveMask;
+
+ #[inline(always)]
+ unsafe fn splat(byte: u8) -> __m256i {
+ _mm256_set1_epi8(byte as i8)
+ }
+
+ #[inline(always)]
+ unsafe fn load_aligned(data: *const u8) -> __m256i {
+ _mm256_load_si256(data as *const __m256i)
+ }
+
+ #[inline(always)]
+ unsafe fn load_unaligned(data: *const u8) -> __m256i {
+ _mm256_loadu_si256(data as *const __m256i)
+ }
+
+ #[inline(always)]
+ unsafe fn movemask(self) -> SensibleMoveMask {
+ SensibleMoveMask(_mm256_movemask_epi8(self) as u32)
+ }
+
+ #[inline(always)]
+ unsafe fn cmpeq(self, vector2: Self) -> __m256i {
+ _mm256_cmpeq_epi8(self, vector2)
+ }
+
+ #[inline(always)]
+ unsafe fn and(self, vector2: Self) -> __m256i {
+ _mm256_and_si256(self, vector2)
+ }
+
+ #[inline(always)]
+ unsafe fn or(self, vector2: Self) -> __m256i {
+ _mm256_or_si256(self, vector2)
+ }
+ }
+}
+
+#[cfg(target_arch = "aarch64")]
+mod aarch64neon {
+ use core::arch::aarch64::*;
+
+ use super::{MoveMask, Vector};
+
+ impl Vector for uint8x16_t {
+ const BITS: usize = 128;
+ const BYTES: usize = 16;
+ const ALIGN: usize = Self::BYTES - 1;
+
+ type Mask = NeonMoveMask;
+
+ #[inline(always)]
+ unsafe fn splat(byte: u8) -> uint8x16_t {
+ vdupq_n_u8(byte)
+ }
+
+ #[inline(always)]
+ unsafe fn load_aligned(data: *const u8) -> uint8x16_t {
+ // I've tried `data.cast::<uint8x16_t>().read()` instead, but
+ // couldn't observe any benchmark differences.
+ Self::load_unaligned(data)
+ }
+
+ #[inline(always)]
+ unsafe fn load_unaligned(data: *const u8) -> uint8x16_t {
+ vld1q_u8(data)
+ }
+
+ #[inline(always)]
+ unsafe fn movemask(self) -> NeonMoveMask {
+ let asu16s = vreinterpretq_u16_u8(self);
+ let mask = vshrn_n_u16(asu16s, 4);
+ let asu64 = vreinterpret_u64_u8(mask);
+ let scalar64 = vget_lane_u64(asu64, 0);
+ NeonMoveMask(scalar64 & 0x8888888888888888)
+ }
+
+ #[inline(always)]
+ unsafe fn cmpeq(self, vector2: Self) -> uint8x16_t {
+ vceqq_u8(self, vector2)
+ }
+
+ #[inline(always)]
+ unsafe fn and(self, vector2: Self) -> uint8x16_t {
+ vandq_u8(self, vector2)
+ }
+
+ #[inline(always)]
+ unsafe fn or(self, vector2: Self) -> uint8x16_t {
+ vorrq_u8(self, vector2)
+ }
+
+ /// This is the only interesting implementation of this routine.
+ /// Basically, instead of doing the "shift right narrow" dance, we use
+ /// adajacent folding max to determine whether there are any non-zero
+ /// bytes in our mask. If there are, *then* we'll do the "shift right
+ /// narrow" dance. In benchmarks, this does lead to slightly better
+ /// throughput, but the win doesn't appear huge.
+ #[inline(always)]
+ unsafe fn movemask_will_have_non_zero(self) -> bool {
+ let low = vreinterpretq_u64_u8(vpmaxq_u8(self, self));
+ vgetq_lane_u64(low, 0) != 0
+ }
+ }
+
+ /// Neon doesn't have a `movemask` that works like the one in x86-64, so we
+ /// wind up using a different method[1]. The different method also produces
+ /// a mask, but 4 bits are set in the neon case instead of a single bit set
+ /// in the x86-64 case. We do an extra step to zero out 3 of the 4 bits,
+ /// but we still wind up with at least 3 zeroes between each set bit. This
+ /// generally means that we need to do some division by 4 before extracting
+ /// offsets.
+ ///
+ /// In fact, the existence of this type is the entire reason that we have
+ /// the `MoveMask` trait in the first place. This basically lets us keep
+ /// the different representations of masks without being forced to unify
+ /// them into a single representation, which could result in extra and
+ /// unnecessary work.
+ ///
+ /// [1]: https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon
+ #[derive(Clone, Copy, Debug)]
+ pub(crate) struct NeonMoveMask(u64);
+
+ impl NeonMoveMask {
+ /// Get the mask in a form suitable for computing offsets.
+ ///
+ /// Basically, this normalizes to little endian. On big endian, this
+ /// swaps the bytes.
+ #[inline(always)]
+ fn get_for_offset(self) -> u64 {
+ #[cfg(target_endian = "big")]
+ {
+ self.0.swap_bytes()
+ }
+ #[cfg(target_endian = "little")]
+ {
+ self.0
+ }
+ }
+ }
+
+ impl MoveMask for NeonMoveMask {
+ #[inline(always)]
+ fn all_zeros_except_least_significant(n: usize) -> NeonMoveMask {
+ debug_assert!(n < 16);
+ NeonMoveMask(!(((1 << n) << 2) - 1))
+ }
+
+ #[inline(always)]
+ fn has_non_zero(self) -> bool {
+ self.0 != 0
+ }
+
+ #[inline(always)]
+ fn count_ones(self) -> usize {
+ self.0.count_ones() as usize
+ }
+
+ #[inline(always)]
+ fn and(self, other: NeonMoveMask) -> NeonMoveMask {
+ NeonMoveMask(self.0 & other.0)
+ }
+
+ #[inline(always)]
+ fn or(self, other: NeonMoveMask) -> NeonMoveMask {
+ NeonMoveMask(self.0 | other.0)
+ }
+
+ #[inline(always)]
+ fn clear_least_significant_bit(self) -> NeonMoveMask {
+ NeonMoveMask(self.0 & (self.0 - 1))
+ }
+
+ #[inline(always)]
+ fn first_offset(self) -> usize {
+ // We are dealing with little endian here (and if we aren't,
+ // we swap the bytes so we are in practice), where the most
+ // significant byte is at a higher address. That means the least
+ // significant bit that is set corresponds to the position of our
+ // first matching byte. That position corresponds to the number of
+ // zeros after the least significant bit.
+ //
+ // Note that unlike `SensibleMoveMask`, this mask has its bits
+ // spread out over 64 bits instead of 16 bits (for a 128 bit
+ // vector). Namely, where as x86-64 will turn
+ //
+ // 0x00 0xFF 0x00 0x00 0xFF
+ //
+ // into 10010, our neon approach will turn it into
+ //
+ // 10000000000010000000
+ //
+ // And this happens because neon doesn't have a native `movemask`
+ // instruction, so we kind of fake it[1]. Thus, we divide the
+ // number of trailing zeros by 4 to get the "real" offset.
+ //
+ // [1]: https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon
+ (self.get_for_offset().trailing_zeros() >> 2) as usize
+ }
+
+ #[inline(always)]
+ fn last_offset(self) -> usize {
+ // See comment in `first_offset` above. This is basically the same,
+ // but coming from the other direction.
+ 16 - (self.get_for_offset().leading_zeros() >> 2) as usize - 1
+ }
+ }
+}
+
+#[cfg(target_arch = "wasm32")]
+mod wasm_simd128 {
+ use core::arch::wasm32::*;
+
+ use super::{SensibleMoveMask, Vector};
+
+ impl Vector for v128 {
+ const BITS: usize = 128;
+ const BYTES: usize = 16;
+ const ALIGN: usize = Self::BYTES - 1;
+
+ type Mask = SensibleMoveMask;
+
+ #[inline(always)]
+ unsafe fn splat(byte: u8) -> v128 {
+ u8x16_splat(byte)
+ }
+
+ #[inline(always)]
+ unsafe fn load_aligned(data: *const u8) -> v128 {
+ *data.cast()
+ }
+
+ #[inline(always)]
+ unsafe fn load_unaligned(data: *const u8) -> v128 {
+ v128_load(data.cast())
+ }
+
+ #[inline(always)]
+ unsafe fn movemask(self) -> SensibleMoveMask {
+ SensibleMoveMask(u8x16_bitmask(self).into())
+ }
+
+ #[inline(always)]
+ unsafe fn cmpeq(self, vector2: Self) -> v128 {
+ u8x16_eq(self, vector2)
+ }
+
+ #[inline(always)]
+ unsafe fn and(self, vector2: Self) -> v128 {
+ v128_and(self, vector2)
+ }
+
+ #[inline(always)]
+ unsafe fn or(self, vector2: Self) -> v128 {
+ v128_or(self, vector2)
+ }
+ }
+}