diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
commit | 2aa4a82499d4becd2284cdb482213d541b8804dd (patch) | |
tree | b80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/rust/rkv | |
parent | Initial commit. (diff) | |
download | firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip |
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/rkv')
62 files changed, 11502 insertions, 0 deletions
diff --git a/third_party/rust/rkv/.cargo-checksum.json b/third_party/rust/rkv/.cargo-checksum.json new file mode 100644 index 0000000000..9f095c753a --- /dev/null +++ b/third_party/rust/rkv/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CODE_OF_CONDUCT.md":"902d5357af363426631d907e641e220b3ec89039164743f8442b3f120479b7cf","Cargo.lock":"9fadf20d1ee1a5bf5c567ce34e74be644f0a798bbe1e2e3845fca1f9aa9e6b98","Cargo.toml":"9e1d053b76252691624ea2f4e43f2bcebfe1ee7f0bfd34f44e28dfe21d5dc27c","LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"f80ce3b29b0cf54927d2bb4457e0b5691a2e6b56a5fc140c551c3cee68f242ae","examples/README.md":"143767fc145bf167ce269a65138cb3f7086cb715b8bc4f73626da82966e646f4","examples/iterator.rs":"844acdb7b81b95547502dddead0095dd98c4d6be63a7a765c97c354c02ee38d1","examples/simple-store.rs":"56c403307cd8a7644baa7831e7a400df0b2d2df25a854b2f3441abff78a54227","run-all-examples.sh":"7f9d11d01017f77e1c9d26e3e82dfca8c6930deaec85e864458e33a7fa267de0","src/backend.rs":"091d6c8aed4782f7e19079aaf2a25db0db02c969edc8058f882dde47137cc983","src/backend/common.rs":"3dd1b7fbe0c62b0731904358b11f82054c918044de6271c502fb0d38b813b67d","src/backend/impl_lmdb.rs":"2ad9749017613defe11d854480d6d7b3db04ec5d2f43f099be30d01da2c59a16","src/backend/impl_lmdb/arch_migrator.rs":"eeaafcb328f7b9e6deb24d5f8ac7c27d114a8e08bdfcc3f2df588178ee234f83","src/backend/impl_lmdb/arch_migrator_error.rs":"ee15d7bee9e12a037e6b7423acf05040672913e0d86ff632d826eaaa3188eecb","src/backend/impl_lmdb/cursor.rs":"77a7611f8638c5358f74f79cccde18581d3f6078f72c3a8419abde144791bfdf","src/backend/impl_lmdb/database.rs":"c52ab76a4389c525a998eef0302e709d57a22f2627a967b2246e98ae15f4a999","src/backend/impl_lmdb/environment.rs":"dac99f2341e2ca581bed013385b6a33f76df30497e15d78cb890e916000f547b","src/backend/impl_lmdb/error.rs":"856782cb1ffda0c0cdd0f0c783762412db35b8b6b6a0aefaf36974fe169ec5a2","src/backend/impl_lmdb/flags.rs":"861144973a397372cc572999ba8f87a538f48d3b9d4f8233ad78ce76cd2d40b1","src/backend/impl_lmdb/info.rs":"e44d9100c0acc179263f41c70d2d139faa1b19efe6948c951958986c5fc90dbf","src/backend/impl_lmdb/iter.rs":"ba4fd8b287b785e2567dd819d52ce58cf5bd73096ac59675ac11b21a06885d8a","src/backend/impl_lmdb/stat.rs":"ec3100fee914cfe4805a7c588344c707c027bad7b37b43b7455baa0772cb64f9","src/backend/impl_lmdb/transaction.rs":"5ecf5c86148e7c2cc62f89abb1e571f6f355989d6bb48af44a3435e5222260e7","src/backend/impl_safe.rs":"df796cd3b43bd973f213f7616c396ac4f006759bef3be8fd5f1792a82e26baeb","src/backend/impl_safe/cursor.rs":"7fb0d39ee8b2ea69f9ac8b733c1a8f487dfa814486821bc6a8bb1b656539942c","src/backend/impl_safe/database.rs":"7b03bbe7ec8183af06f2376028049ad638e8e778b9686364e13f06a63f7102c4","src/backend/impl_safe/environment.rs":"64893437feff6e96b9e54d307ed5b676ed47c46fc4433db5f405806523bfb92a","src/backend/impl_safe/error.rs":"5a41b7b8cc059abd485c8f740593e35d2d3a4e90466995e954711113f79da200","src/backend/impl_safe/flags.rs":"8775cfab62a78466184310bffb7c0f16c51c4b6d941571348a1ac5ece76a6de0","src/backend/impl_safe/info.rs":"c9dc67d989394dd7a25e99681e4a25f19b4ca3992eb18b719fb89742fae635b2","src/backend/impl_safe/iter.rs":"b98b54b51b474cb1e31f90136b64871baff6c31261d60bd4f79faa329768f2e8","src/backend/impl_safe/snapshot.rs":"de83b5feffcb2603e64c4f53314c4b033fbc3289c88076be85cc33eec88b1d43","src/backend/impl_safe/stat.rs":"77ea9937c2ff839cba4ed5597b4804550915d5c50fce0fc86133bf23cff49d95","src/backend/impl_safe/transaction.rs":"32fbda2c7aeacb0c485cc91766558708c038181f3e8bc613c4b6ba1a55c0c8d1","src/backend/traits.rs":"da7e4440e6e59575a15899df6a4d6930f9107b180b95f24fe727b8e62dcefd65","src/bin/dump.rs":"78929424ec2e9d9f155e4eab9b118a6f478caae422db734302fa254a816e5be1","src/bin/rand.rs":"e3a2da9bb449aa9b54e8930c2a54c8a509300283a612360802f9182ae9db5ce4","src/env.rs":"80c5f175a2e7c6a278d59618d083010f0ef523b00b312ac1208db879ada5181b","src/error.rs":"53e81718c9295f6cc9417af69bf8c33d67e2761e1ddcc249a06f8b36e4c4e541","src/helpers.rs":"2565e271d6edac3e2551d9fdede00a4348c98ddd2df6d95ef08112ced4397f28","src/lib.rs":"7490eaecffed4145eaa82d996de450521a7877babf91e28f7d4687bd7e4c6f41","src/manager.rs":"aca3092979841805c6b54c5a3b41b98e3708c5f8e5d0a4f35f6e488703ae83f4","src/migrator.rs":"f1b63a6e3172f3c939693ae3d2bcef993bce6468ae58f73ea8691c79647917ba","src/readwrite.rs":"d4296a27458119c47275b230e0d94740c249e05921b8ac7ecbae4c91c92bca0a","src/store.rs":"607eff19464b0deed5d2f61d989f4370f30fd0b16e20a454eeffa51cf2175868","src/store/integer.rs":"d72ffc052bc3f3d91987ae4afaae4fca819f0ffa7155c83c64b78eb6081055a3","src/store/integermulti.rs":"b807b896582dca59d341a99cd5c7539ea8cebbffadc85072c81dde1f15d0ee43","src/store/keys.rs":"584bf897df7a0a10dd2841cf31cb693124748757902374d215b4d7caf79baae5","src/store/keys/encodables.rs":"d8b5dd6f49cab4a44b6f638c473ad372f589669e7ef9bd6935aa960840b95063","src/store/keys/primitives.rs":"f714e54dd6507e048cf867deecd15a5a452298b8255575aa9eb1c67317fff5dd","src/store/multi.rs":"6337401b68ac61022e4f1668764cd7d4fa00357653db488f61eb7d3ed5424145","src/store/single.rs":"3dc8cab214af5169cb1e34072621e55e40043da0ef6609138a9df1d3f1415a3b","src/value.rs":"4ccf8de44934b8c1baaff29b7993e6c442ecfa2380e73ee37d2eca5aad310a60","tests/env-all.rs":"3ad08161ae79e793241180b0f716f2e9981504cf24a9f348de958944b9e54653","tests/env-lmdb.rs":"8f48bd097f1b18fc9e61518d82086246853ff9cd965241ccf765371e7e668273","tests/env-migration.rs":"55eb74d11f7e718d9ec3a39746e76156cd966d2fb5092a4edec14f486cbd14dc","tests/env-safe.rs":"4b768a29f68f6d832204e0130bcd058bf1d9f684641238e5f713c3fc7e908ddf","tests/integer-store.rs":"2deaeee18ea945f54ef5ba7d85917e13e56e46b7af5de03f678f1c4a99d67292","tests/manager.rs":"029bad15cf0f7f323eaa2a529c9c54c27e4738e6fead0bdd0c48899b7422e361","tests/multi-integer-store.rs":"e53f4753fa3fd8891404048aa533fafe3c1d58230adf1a1a23d31d3c421c82b2","tests/test_txn.rs":"4ff987baab7c29db32d472e47279c06832663aa10c67268b23639e96f76a4dcd"},"package":"917d7a01f8c1ae46226e9d8dd24314279be7b04dfd0b24340d420e6927c2e687"}
\ No newline at end of file diff --git a/third_party/rust/rkv/CODE_OF_CONDUCT.md b/third_party/rust/rkv/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..498baa3fb0 --- /dev/null +++ b/third_party/rust/rkv/CODE_OF_CONDUCT.md @@ -0,0 +1,15 @@ +# Community Participation Guidelines + +This repository is governed by Mozilla's code of conduct and etiquette guidelines. +For more details, please read the +[Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). + +## How to Report +For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. + +<!-- +## Project Specific Etiquette + +In some cases, there will be additional project etiquette i.e.: (https://bugzilla.mozilla.org/page.cgi?id=etiquette.html). +Please update for your project. +--> diff --git a/third_party/rust/rkv/Cargo.lock b/third_party/rust/rkv/Cargo.lock new file mode 100644 index 0000000000..f1af591c95 --- /dev/null +++ b/third_party/rust/rkv/Cargo.lock @@ -0,0 +1,492 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "addr2line" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "autocfg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" + +[[package]] +name = "backtrace" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "bincode" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" +dependencies = [ + "byteorder", + "serde", +] + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "cc" +version = "1.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "failure" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" +dependencies = [ + "backtrace", + "failure_derive", +] + +[[package]] +name = "failure_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "getrandom" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" + +[[package]] +name = "id-arena" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005" + +[[package]] +name = "idna" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701" + +[[package]] +name = "lmdb-rkv" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "447a296f7aca299cfbb50f4e4f3d49451549af655fb7215d7f8c0c3d64bad42b" +dependencies = [ + "bitflags", + "byteorder", + "libc", + "lmdb-rkv-sys", +] + +[[package]] +name = "lmdb-rkv-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b27470ac25167b3afdfb6af8fcd3bc1be67de50ffbdaf4073378cfded6ae24a5" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "log" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "matches" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" + +[[package]] +name = "miniz_oxide" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +dependencies = [ + "adler", +] + +[[package]] +name = "num-traits" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +dependencies = [ + "autocfg", +] + +[[package]] +name = "object" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" + +[[package]] +name = "ordered-float" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3741934be594d77de1c8461ebcbbe866f585ea616a9753aa78f2bdc69f0e4579" +dependencies = [ + "num-traits", +] + +[[package]] +name = "paste" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste-impl" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +dependencies = [ + "proc-macro-hack", +] + +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" + +[[package]] +name = "pkg-config" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" + +[[package]] +name = "ppv-lite86" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" + +[[package]] +name = "proc-macro-hack" +version = "0.5.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" + +[[package]] +name = "proc-macro2" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom", + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core", +] + +[[package]] +name = "redox_syscall" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "rkv" +version = "0.16.1" +dependencies = [ + "arrayref", + "bincode", + "bitflags", + "byteorder", + "failure", + "id-arena", + "lazy_static", + "lmdb-rkv", + "log", + "ordered-float", + "paste", + "serde", + "serde_derive", + "tempfile", + "url", + "uuid", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" + +[[package]] +name = "serde" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "syn" +version = "1.0.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "936cae2873c940d92e697597c5eee105fb570cd5689c695806f672883653349b" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "synstructure" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "tempfile" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +dependencies = [ + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "tinyvec" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" + +[[package]] +name = "unicode-bidi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +dependencies = [ + "matches", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "url" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +dependencies = [ + "idna", + "matches", + "percent-encoding", +] + +[[package]] +name = "uuid" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/rkv/Cargo.toml b/third_party/rust/rkv/Cargo.toml new file mode 100644 index 0000000000..7ad63d705c --- /dev/null +++ b/third_party/rust/rkv/Cargo.toml @@ -0,0 +1,88 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "rkv" +version = "0.16.1" +authors = ["Richard Newman <rnewman@twinql.com>", "Nan Jiang <najiang@mozilla.com>", "Myk Melez <myk@mykzilla.org>", "Victor Porof <vporof@mozilla.com>"] +exclude = ["/tests/envs/*"] +description = "A simple, humane, typed key-value storage solution" +homepage = "https://github.com/mozilla/rkv" +documentation = "https://docs.rs/rkv" +readme = "README.md" +keywords = ["lmdb", "database", "storage"] +categories = ["database"] +license = "Apache-2.0" +repository = "https://github.com/mozilla/rkv" +[dependencies.arrayref] +version = "0.3" + +[dependencies.bincode] +version = "1.0" + +[dependencies.bitflags] +version = "1" + +[dependencies.byteorder] +version = "1" + +[dependencies.failure] +version = "0.1" +features = ["derive"] +default_features = false + +[dependencies.id-arena] +version = "2.2" + +[dependencies.lazy_static] +version = "1.0" + +[dependencies.lmdb-rkv] +version = "0.14" + +[dependencies.log] +version = "0.4" + +[dependencies.ordered-float] +version = "1.0" + +[dependencies.paste] +version = "0.1" + +[dependencies.serde] +version = "1.0" +features = ["derive", "rc"] + +[dependencies.serde_derive] +version = "1.0" + +[dependencies.url] +version = "2.0" + +[dependencies.uuid] +version = "0.8" +[dev-dependencies.byteorder] +version = "1" + +[dev-dependencies.tempfile] +version = "3" + +[features] +backtrace = ["failure/backtrace", "failure/std"] +db-dup-sort = [] +db-int-key = [] +default = ["db-dup-sort", "db-int-key"] +no-canonicalize-path = [] +with-asan = ["lmdb-rkv/with-asan"] +with-fuzzer = ["lmdb-rkv/with-fuzzer"] +with-fuzzer-no-link = ["lmdb-rkv/with-fuzzer-no-link"] diff --git a/third_party/rust/rkv/LICENSE b/third_party/rust/rkv/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/third_party/rust/rkv/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/rust/rkv/README.md b/third_party/rust/rkv/README.md new file mode 100644 index 0000000000..59383a4d34 --- /dev/null +++ b/third_party/rust/rkv/README.md @@ -0,0 +1,80 @@ +# rkv + +[![Travis CI Build Status](https://travis-ci.org/mozilla/rkv.svg?branch=master)](https://travis-ci.org/mozilla/rkv) +[![Appveyor Build Status](https://ci.appveyor.com/api/projects/status/lk936u5y5bi6qafb/branch/master?svg=true)](https://ci.appveyor.com/project/mykmelez/rkv/branch/master) +[![Documentation](https://docs.rs/rkv/badge.svg)](https://docs.rs/rkv/) +[![Crate](https://img.shields.io/crates/v/rkv.svg)](https://crates.io/crates/rkv) + +The [rkv Rust crate](https://crates.io/crates/rkv) is a simple, humane, typed key-value storage solution. It supports multiple backend engines with varying guarantees, such as [LMDB](http://www.lmdb.tech/doc/) for performance, or "SafeMode" for reliability. + +## ⚠️ Warning ⚠️ + +To use rkv in production/release environments at Mozilla, you may do so with the "SafeMode" backend, for example: + +```rust +use rkv::{Manager, Rkv}; +use rkv::backend::{SafeMode, SafeModeEnvironment}; + +let mut manager = Manager::<SafeModeEnvironment>::singleton().write().unwrap(); +let shared_rkv = manager.get_or_create(path, Rkv::new::<SafeMode>).unwrap(); + +... +``` + +The "SafeMode" backend performs well, with two caveats: the entire database is stored in memory, and write transactions are synchronously written to disk (only on commit). + +In the future, it will be advisable to switch to a different backend with better performance guarantees. We're working on either fixing some LMDB crashes, or offering more choices of backend engines (e.g. SQLite). + +## Use + +Comprehensive information about using rkv is available in its [online documentation](https://docs.rs/rkv/), which can also be generated for local consumption: + +```sh +cargo doc --open +``` + +## Build + +Build this project as you would build other Rust crates: + +```sh +cargo build +``` + +### Features + +There are several features that you can opt-in and out of when using rkv: + +By default, `db-dup-sort` and `db-int-key` features offer high level database APIs which allow multiple values per key, and optimizations around integer-based keys respectively. Opt out of these default features when specifying the rkv dependency in your Cargo.toml file to disable them; doing so avoids a certain amount of overhead required to support them. + +If you specify the `backtrace` feature, backtraces will be enabled in "failure" errors. This feature is disabled by default. + +To aid fuzzing efforts, `with-asan`, `with-fuzzer`, and `with-fuzzer-no-link` configure the build scripts responsible with compiling the underlying backing engines (e.g. LMDB) to build with these LLMV features enabled. Please refer to the official LLVM/Clang documentation on them for more informatiuon. These features are also disabled by default. + +## Test + +Test this project as you would test other Rust crates: + +```sh +cargo test +``` + +The project includes unit and doc tests embedded in the `src/` files, integration tests in the `tests/` subdirectory, and usage examples in the `examples/` subdirectory. To ensure your changes don't break examples, also run them via the run-all-examples.sh shell script: + +```sh +./run-all-examples.sh +``` + +Note: the test fixtures in the `tests/envs/` subdirectory aren't included in the package published to crates.io, so you must clone this repository in order to run the tests that depend on those fixtures or use the `rand` and `dump` executables to recreate them. + +## Contribute + +Of the various open source archetypes described in [A Framework for Purposeful Open Source](https://medium.com/mozilla-open-innovation/whats-your-open-source-strategy-here-are-10-answers-383221b3f9d3), the rkv project most closely resembles the Specialty Library, and we welcome contributions. Please report problems or ask questions using this repo's GitHub [issue tracker](https://github.com/mozilla/rkv/issues) and submit [pull requests](https://github.com/mozilla/rkv/pulls) for code and documentation changes. + +rkv relies on the latest [rustfmt](https://github.com/rust-lang-nursery/rustfmt) for code formatting, so please make sure your pull request passes the rustfmt before submitting it for review. See rustfmt's [quick start](https://github.com/rust-lang-nursery/rustfmt#quick-start) for installation details. + +We follow Mozilla's [Community Participation Guidelines](https://www.mozilla.org/en-US/about/governance/policies/participation/) while contributing to this project. + +## License + +The rkv source code is licensed under the Apache License, Version 2.0, as described in the [LICENSE](https://github.com/mozilla/rkv/blob/master/LICENSE) file. diff --git a/third_party/rust/rkv/examples/README.md b/third_party/rust/rkv/examples/README.md new file mode 100644 index 0000000000..0e5a3e6d67 --- /dev/null +++ b/third_party/rust/rkv/examples/README.md @@ -0,0 +1,11 @@ +## Examples of how to use rkv + +All examples can be executed with: + +``` +cargo run --example $name +``` + +* [`simple-store`](simple-store.rs) - a simple key/value store that showcases the basic usage of rkv. + +* [`iterator`](iterator.rs) - a demo that showcases the basic usage of iterators in rkv. diff --git a/third_party/rust/rkv/examples/iterator.rs b/third_party/rust/rkv/examples/iterator.rs new file mode 100644 index 0000000000..6ae060588f --- /dev/null +++ b/third_party/rust/rkv/examples/iterator.rs @@ -0,0 +1,84 @@ +// Any copyright is dedicated to the Public Domain. +// http://creativecommons.org/publicdomain/zero/1.0/ + +//! A demo that showcases the basic usage of iterators in rkv. +//! +//! You can test this out by running: +//! +//! cargo run --example iterator + +use std::{ + fs, + str, +}; + +use tempfile::Builder; + +use rkv::{ + backend::{ + Lmdb, + LmdbDatabase, + LmdbEnvironment, + }, + Manager, + Rkv, + SingleStore, + StoreError, + StoreOptions, + Value, +}; + +fn main() { + let root = Builder::new().prefix("iterator").tempdir().unwrap(); + fs::create_dir_all(root.path()).unwrap(); + let p = root.path(); + + let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap(); + let created_arc = manager.get_or_create(p, Rkv::new::<Lmdb>).unwrap(); + let k = created_arc.read().unwrap(); + let store = k.open_single("store", StoreOptions::create()).unwrap(); + + populate_store(&k, store).unwrap(); + + let reader = k.read().unwrap(); + + println!("Iterating from the beginning..."); + // Reader::iter_start() iterates from the first item in the store, and + // returns the (key, value) tuples in order. + let mut iter = store.iter_start(&reader).unwrap(); + while let Some(Ok((country, city))) = iter.next() { + println!("{}, {:?}", str::from_utf8(country).unwrap(), city); + } + + println!(); + println!("Iterating from the given key..."); + // Reader::iter_from() iterates from the first key equal to or greater + // than the given key. + let mut iter = store.iter_from(&reader, "Japan").unwrap(); + while let Some(Ok((country, city))) = iter.next() { + println!("{}, {:?}", str::from_utf8(country).unwrap(), city); + } + + println!(); + println!("Iterating from the given prefix..."); + let mut iter = store.iter_from(&reader, "Un").unwrap(); + while let Some(Ok((country, city))) = iter.next() { + println!("{}, {:?}", str::from_utf8(country).unwrap(), city); + } +} + +fn populate_store(k: &Rkv<LmdbEnvironment>, store: SingleStore<LmdbDatabase>) -> Result<(), StoreError> { + let mut writer = k.write()?; + for (country, city) in vec![ + ("Canada", Value::Str("Ottawa")), + ("United States of America", Value::Str("Washington")), + ("Germany", Value::Str("Berlin")), + ("France", Value::Str("Paris")), + ("Italy", Value::Str("Rome")), + ("United Kingdom", Value::Str("London")), + ("Japan", Value::Str("Tokyo")), + ] { + store.put(&mut writer, country, &city)?; + } + writer.commit() +} diff --git a/third_party/rust/rkv/examples/simple-store.rs b/third_party/rust/rkv/examples/simple-store.rs new file mode 100644 index 0000000000..620181d25a --- /dev/null +++ b/third_party/rust/rkv/examples/simple-store.rs @@ -0,0 +1,194 @@ +// Any copyright is dedicated to the Public Domain. +// http://creativecommons.org/publicdomain/zero/1.0/ + +//! A simple rkv demo that showcases the basic usage (put/get/delete) of rkv. +//! +//! You can test this out by running: +//! +//! cargo run --example simple-store + +use std::fs; + +use tempfile::Builder; + +use rkv::{ + backend::{ + BackendStat, + Lmdb, + LmdbDatabase, + LmdbEnvironment, + LmdbRwTransaction, + }, + Manager, + Rkv, + StoreOptions, + Value, +}; + +type MultiStore = rkv::MultiStore<LmdbDatabase>; +type Writer<'w> = rkv::Writer<LmdbRwTransaction<'w>>; + +fn getput<'w, 's>(store: MultiStore, writer: &'w mut Writer, ids: &'s mut Vec<String>) { + let keys = vec!["str1", "str2", "str3"]; + // we convert the writer into a cursor so that we can safely read + for k in keys.iter() { + // this is a multi-valued database, so get returns an iterator + let mut iter = store.get(writer, k).unwrap(); + while let Some(Ok((_key, val))) = iter.next() { + if let Value::Str(s) = val { + ids.push(s.to_owned()); + } else { + panic!("didn't get a string back!"); + } + } + } + for id in ids { + store.put(writer, &id, &Value::Blob(b"weeeeeee")).unwrap(); + } +} + +fn delete(store: MultiStore, writer: &mut Writer) { + let keys = vec!["str1", "str2", "str3"]; + let vals = vec!["string uno", "string quatro", "string siete"]; + // we convert the writer into a cursor so that we can safely read + for i in 0..keys.len() { + store.delete(writer, &keys[i], &Value::Str(vals[i])).unwrap(); + } +} + +fn main() { + let root = Builder::new().prefix("simple-db").tempdir().unwrap(); + fs::create_dir_all(root.path()).unwrap(); + let p = root.path(); + + // The manager enforces that each process opens the same lmdb environment at most once + let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap(); + let created_arc = manager.get_or_create(p, Rkv::new::<Lmdb>).unwrap(); + let k = created_arc.read().unwrap(); + + // Creates a store called "store" + let store = k.open_single("store", StoreOptions::create()).unwrap(); + let multistore = k.open_multi("multistore", StoreOptions::create()).unwrap(); + + println!("Inserting data..."); + { + // Use a writer to mutate the store + let mut writer = k.write().unwrap(); + store.put(&mut writer, "int", &Value::I64(1234)).unwrap(); + store.put(&mut writer, "uint", &Value::U64(1234_u64)).unwrap(); + store.put(&mut writer, "float", &Value::F64(1234.0.into())).unwrap(); + store.put(&mut writer, "instant", &Value::Instant(1_528_318_073_700)).unwrap(); + store.put(&mut writer, "boolean", &Value::Bool(true)).unwrap(); + store.put(&mut writer, "string", &Value::Str("héllo, yöu")).unwrap(); + store.put(&mut writer, "json", &Value::Json(r#"{"foo":"bar", "number": 1}"#)).unwrap(); + store.put(&mut writer, "blob", &Value::Blob(b"blob")).unwrap(); + writer.commit().unwrap(); + } + + println!("Testing getput"); + { + let mut ids = Vec::new(); + let mut writer = k.write().unwrap(); + multistore.put(&mut writer, "str1", &Value::Str("string uno")).unwrap(); + multistore.put(&mut writer, "str1", &Value::Str("string dos")).unwrap(); + multistore.put(&mut writer, "str1", &Value::Str("string tres")).unwrap(); + multistore.put(&mut writer, "str2", &Value::Str("string quatro")).unwrap(); + multistore.put(&mut writer, "str2", &Value::Str("string cinco")).unwrap(); + multistore.put(&mut writer, "str2", &Value::Str("string seis")).unwrap(); + multistore.put(&mut writer, "str3", &Value::Str("string siete")).unwrap(); + multistore.put(&mut writer, "str3", &Value::Str("string ocho")).unwrap(); + multistore.put(&mut writer, "str3", &Value::Str("string nueve")).unwrap(); + getput(multistore, &mut writer, &mut ids); + writer.commit().unwrap(); + let mut writer = k.write().unwrap(); + delete(multistore, &mut writer); + writer.commit().unwrap(); + } + + println!("Looking up keys..."); + { + // Use a reader to query the store + let reader = k.read().unwrap(); + println!("Get int {:?}", store.get(&reader, "int").unwrap()); + println!("Get uint {:?}", store.get(&reader, "uint").unwrap()); + println!("Get float {:?}", store.get(&reader, "float").unwrap()); + println!("Get instant {:?}", store.get(&reader, "instant").unwrap()); + println!("Get boolean {:?}", store.get(&reader, "boolean").unwrap()); + println!("Get string {:?}", store.get(&reader, "string").unwrap()); + println!("Get json {:?}", store.get(&reader, "json").unwrap()); + println!("Get blob {:?}", store.get(&reader, "blob").unwrap()); + println!("Get non-existent {:?}", store.get(&reader, "non-existent").unwrap()); + } + + println!("Looking up keys via Writer.get()..."); + { + let mut writer = k.write().unwrap(); + store.put(&mut writer, "foo", &Value::Str("bar")).unwrap(); + store.put(&mut writer, "bar", &Value::Str("baz")).unwrap(); + store.delete(&mut writer, "foo").unwrap(); + println!("It should be None! ({:?})", store.get(&writer, "foo").unwrap()); + println!("Get bar ({:?})", store.get(&writer, "bar").unwrap()); + writer.commit().unwrap(); + let reader = k.read().expect("reader"); + println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap()); + println!("Get bar {:?}", store.get(&reader, "bar").unwrap()); + } + + println!("Aborting transaction..."); + { + // Aborting a write transaction rollbacks the change(s) + let mut writer = k.write().unwrap(); + store.put(&mut writer, "foo", &Value::Str("bar")).unwrap(); + writer.abort(); + + let reader = k.read().expect("reader"); + println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap()); + // Explicitly aborting a transaction is not required unless an early + // abort is desired, since both read and write transactions will + // implicitly be aborted once they go out of scope. + } + + println!("Deleting keys..."); + { + // Deleting a key/value also requires a write transaction + let mut writer = k.write().unwrap(); + store.put(&mut writer, "foo", &Value::Str("bar")).unwrap(); + store.delete(&mut writer, "foo").unwrap(); + println!("It should be None! ({:?})", store.get(&writer, "foo").unwrap()); + writer.commit().unwrap(); + + // Committing a transaction consumes the writer, preventing you + // from reusing it by failing and reporting a compile-time error. + // This line would report error[E0382]: use of moved value: `writer`. + // store.put(&mut writer, "baz", &Value::Str("buz")).unwrap(); + } + + println!("Clearing store..."); + { + // Clearing a store deletes all the entries in that store + let mut writer = k.write().unwrap(); + store.put(&mut writer, "foo", &Value::Str("bar")).unwrap(); + store.put(&mut writer, "bar", &Value::Str("baz")).unwrap(); + store.clear(&mut writer).unwrap(); + writer.commit().unwrap(); + + let reader = k.read().expect("reader"); + println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap()); + println!("It should be None! ({:?})", store.get(&reader, "bar").unwrap()); + } + + println!("Write and read on multiple stores..."); + { + let another_store = k.open_single("another_store", StoreOptions::create()).unwrap(); + let mut writer = k.write().unwrap(); + store.put(&mut writer, "foo", &Value::Str("bar")).unwrap(); + another_store.put(&mut writer, "foo", &Value::Str("baz")).unwrap(); + writer.commit().unwrap(); + + let reader = k.read().unwrap(); + println!("Get from store value: {:?}", store.get(&reader, "foo").unwrap()); + println!("Get from another store value: {:?}", another_store.get(&reader, "foo").unwrap()); + } + + println!("Environment statistics: btree depth = {}", k.stat().unwrap().depth()); +} diff --git a/third_party/rust/rkv/run-all-examples.sh b/third_party/rust/rkv/run-all-examples.sh new file mode 100755 index 0000000000..fa7eb959d6 --- /dev/null +++ b/third_party/rust/rkv/run-all-examples.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -e + +cargo build --examples + +for file in examples/*; do + filename=$(basename ${file}) + extension=${filename##*.} + example_name=${filename%.*} + if [[ "${extension}" = "rs" ]]; then + cargo run --example ${example_name} + fi +done diff --git a/third_party/rust/rkv/src/backend.rs b/third_party/rust/rkv/src/backend.rs new file mode 100644 index 0000000000..46ee7a2d3a --- /dev/null +++ b/third_party/rust/rkv/src/backend.rs @@ -0,0 +1,54 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +mod common; +mod impl_lmdb; +mod impl_safe; +mod traits; + +pub use common::*; +pub use traits::*; + +pub use impl_lmdb::{ + ArchMigrateError as LmdbArchMigrateError, + ArchMigrateResult as LmdbArchMigrateResult, + ArchMigrator as LmdbArchMigrator, + DatabaseFlagsImpl as LmdbDatabaseFlags, + DatabaseImpl as LmdbDatabase, + EnvironmentBuilderImpl as Lmdb, + EnvironmentFlagsImpl as LmdbEnvironmentFlags, + EnvironmentImpl as LmdbEnvironment, + ErrorImpl as LmdbError, + InfoImpl as LmdbInfo, + IterImpl as LmdbIter, + RoCursorImpl as LmdbRoCursor, + RoTransactionImpl as LmdbRoTransaction, + RwCursorImpl as LmdbRwCursor, + RwTransactionImpl as LmdbRwTransaction, + StatImpl as LmdbStat, + WriteFlagsImpl as LmdbWriteFlags, +}; + +pub use impl_safe::{ + DatabaseFlagsImpl as SafeModeDatabaseFlags, + DatabaseImpl as SafeModeDatabase, + EnvironmentBuilderImpl as SafeMode, + EnvironmentFlagsImpl as SafeModeEnvironmentFlags, + EnvironmentImpl as SafeModeEnvironment, + ErrorImpl as SafeModeError, + InfoImpl as SafeModeInfo, + IterImpl as SafeModeIter, + RoCursorImpl as SafeModeRoCursor, + RoTransactionImpl as SafeModeRoTransaction, + RwCursorImpl as SafeModeRwCursor, + RwTransactionImpl as SafeModeRwTransaction, + StatImpl as SafeModeStat, + WriteFlagsImpl as SafeModeWriteFlags, +}; diff --git a/third_party/rust/rkv/src/backend/common.rs b/third_party/rust/rkv/src/backend/common.rs new file mode 100644 index 0000000000..bea3839d03 --- /dev/null +++ b/third_party/rust/rkv/src/backend/common.rs @@ -0,0 +1,44 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +#![allow(non_camel_case_types)] + +pub enum EnvironmentFlags { + FIXED_MAP, + NO_SUB_DIR, + WRITE_MAP, + READ_ONLY, + NO_META_SYNC, + NO_SYNC, + MAP_ASYNC, + NO_TLS, + NO_LOCK, + NO_READAHEAD, + NO_MEM_INIT, +} + +pub enum DatabaseFlags { + REVERSE_KEY, + #[cfg(feature = "db-dup-sort")] + DUP_SORT, + #[cfg(feature = "db-dup-sort")] + DUP_FIXED, + #[cfg(feature = "db-int-key")] + INTEGER_KEY, + INTEGER_DUP, + REVERSE_DUP, +} + +pub enum WriteFlags { + NO_OVERWRITE, + NO_DUP_DATA, + CURRENT, + APPEND, + APPEND_DUP, +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb.rs b/third_party/rust/rkv/src/backend/impl_lmdb.rs new file mode 100644 index 0000000000..8f99206928 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb.rs @@ -0,0 +1,49 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +mod arch_migrator; +mod arch_migrator_error; +mod cursor; +mod database; +mod environment; +mod error; +mod flags; +mod info; +mod iter; +mod stat; +mod transaction; + +pub use arch_migrator::{ + MigrateError as ArchMigrateError, + MigrateResult as ArchMigrateResult, + Migrator as ArchMigrator, +}; +pub use cursor::{ + RoCursorImpl, + RwCursorImpl, +}; +pub use database::DatabaseImpl; +pub use environment::{ + EnvironmentBuilderImpl, + EnvironmentImpl, +}; +pub use error::ErrorImpl; +pub use flags::{ + DatabaseFlagsImpl, + EnvironmentFlagsImpl, + WriteFlagsImpl, +}; +pub use info::InfoImpl; +pub use iter::IterImpl; +pub use stat::StatImpl; +pub use transaction::{ + RoTransactionImpl, + RwTransactionImpl, +}; diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/arch_migrator.rs b/third_party/rust/rkv/src/backend/impl_lmdb/arch_migrator.rs new file mode 100644 index 0000000000..447d5c1af6 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/arch_migrator.rs @@ -0,0 +1,1009 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//! A utility for migrating data from one LMDB environment to another. Notably, this tool +//! can migrate data from an enviroment created with a different bit-depth than the +//! current rkv consumer, which enables the consumer to retrieve data from an environment +//! that can't be read directly using the rkv APIs. +//! +//! The utility supports both 32-bit and 64-bit LMDB source environments, and it +//! automatically migrates data in both the default database and any named (sub) +//! databases. It also migrates the source environment's "map size" and "max DBs" +//! configuration options to the destination environment. +//! +//! The destination environment must be at the rkv consumer's bit depth and should be +//! empty of data. It can be an empty directory, in which case the utility will create a +//! new LMDB environment within the directory. +//! +//! The tool currently has these limitations: +//! +//! 1. It doesn't support migration from environments created with +//! `EnvironmentFlags::NO_SUB_DIR`. To migrate such an environment, create a +//! temporary directory, copy the environment's data file to a file called data.mdb in +//! the temporary directory, then migrate the temporary directory as the source +//! environment. +//! 2. It doesn't support migration from databases created with DatabaseFlags::DUP_SORT` +//! (with or without `DatabaseFlags::DUP_FIXED`). +//! 3. It doesn't account for existing data in the destination environment, which means +//! that it can overwrite data (causing data loss) or fail to migrate data if the +//! destination environment contains existing data. +//! +//! ## Basic Usage +//! +//! Call `Migrator::new()` with the path to the source environment to create a `Migrator` +//! instance; then call the instance's `migrate()` method with the path to the destination +//! environment to migrate data from the source to the destination environment. For +//! example, this snippet migrates data from the tests/envs/ref_env_32 environment to a +//! new environment in a temporary directory: +//! +//! ``` +//! use rkv::migrator::LmdbArchMigrator as Migrator; +//! use std::path::Path; +//! use tempfile::tempdir; +//! let mut migrator = Migrator::new(Path::new("tests/envs/ref_env_32")).unwrap(); +//! migrator.migrate(&tempdir().unwrap().path()).unwrap(); +//! ``` +//! +//! Both `Migrator::new()` and `migrate()` return a `MigrateResult` that is either an +//! `Ok()` result or an `Err<MigrateError>`, where `MigrateError` is an enum whose +//! variants identify specific kinds of migration failures. + +use std::{ + collections::{ + BTreeMap, + HashMap, + }, + convert::TryFrom, + fs::File, + io::{ + Cursor, + Read, + Seek, + SeekFrom, + Write, + }, + path::{ + Path, + PathBuf, + }, + rc::Rc, + str, +}; + +use bitflags::bitflags; +use byteorder::{ + LittleEndian, + ReadBytesExt, +}; +use lmdb::{ + DatabaseFlags, + Environment, + Transaction, + WriteFlags, +}; + +pub use super::arch_migrator_error::MigrateError; + +const PAGESIZE: u16 = 4096; + +// The magic number is 0xBEEFC0DE, which is 0xDEC0EFBE in little-endian. It appears at +// offset 12 on 32-bit systems and 16 on 64-bit systems. We don't support big-endian +// migration, but presumably we could do so by detecting the order of the bytes. +const MAGIC: [u8; 4] = [0xDE, 0xC0, 0xEF, 0xBE]; + +pub type MigrateResult<T> = Result<T, MigrateError>; + +bitflags! { + #[derive(Default)] + struct PageFlags: u16 { + const BRANCH = 0x01; + const LEAF = 0x02; + const OVERFLOW = 0x04; + const META = 0x08; + const DIRTY = 0x10; + const LEAF2 = 0x20; + const SUBP = 0x40; + const LOOSE = 0x4000; + const KEEP = 0x8000; + } +} + +bitflags! { + #[derive(Default)] + struct NodeFlags: u16 { + const BIGDATA = 0x01; + const SUBDATA = 0x02; + const DUPDATA = 0x04; + } +} + +// The bit depth of the executable that created an LMDB environment. The Migrator +// determines this automatically based on the location of the magic number in data.mdb. +#[derive(Clone, Copy, PartialEq)] +enum Bits { + U32, + U64, +} + +impl Bits { + // The size of usize for the bit-depth represented by the enum variant. + fn size(self) -> usize { + match self { + Bits::U32 => 4, + Bits::U64 => 8, + } + } +} + +// The equivalent of PAGEHDRSZ in LMDB, except that this one varies by bits. +fn page_header_size(bits: Bits) -> u64 { + match bits { + Bits::U32 => 12, + Bits::U64 => 16, + } +} + +// The equivalent of P_INVALID in LMDB, except that this one varies by bits. +fn validate_page_num(page_num: u64, bits: Bits) -> MigrateResult<()> { + let invalid_page_num = match bits { + Bits::U32 => u64::from(!0u32), + Bits::U64 => !0u64, + }; + + if page_num == invalid_page_num { + return Err(MigrateError::InvalidPageNum); + } + + Ok(()) +} + +#[derive(Clone, Debug, Default)] +struct Database { + md_pad: u32, + md_flags: DatabaseFlags, + md_depth: u16, + md_branch_pages: u64, + md_leaf_pages: u64, + md_overflow_pages: u64, + md_entries: u64, + md_root: u64, +} + +impl Database { + fn new(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<Database> { + Ok(Database { + md_pad: cursor.read_u32::<LittleEndian>()?, + md_flags: DatabaseFlags::from_bits(cursor.read_u16::<LittleEndian>()?.into()) + .ok_or(MigrateError::InvalidDatabaseBits)?, + md_depth: cursor.read_u16::<LittleEndian>()?, + md_branch_pages: cursor.read_uint::<LittleEndian>(bits.size())?, + md_leaf_pages: cursor.read_uint::<LittleEndian>(bits.size())?, + md_overflow_pages: cursor.read_uint::<LittleEndian>(bits.size())?, + md_entries: cursor.read_uint::<LittleEndian>(bits.size())?, + md_root: cursor.read_uint::<LittleEndian>(bits.size())?, + }) + } +} + +#[derive(Debug, Default)] +struct Databases { + free: Database, + main: Database, +} + +#[derive(Debug, Default)] +struct MetaData { + mm_magic: u32, + mm_version: u32, + mm_address: u64, + mm_mapsize: u64, + mm_dbs: Databases, + mm_last_pg: u64, + mm_txnid: u64, +} + +#[derive(Debug)] +enum LeafNode { + Regular { + mn_lo: u16, + mn_hi: u16, + mn_flags: NodeFlags, + mn_ksize: u16, + mv_size: u32, + key: Vec<u8>, + value: Vec<u8>, + }, + BigData { + mn_lo: u16, + mn_hi: u16, + mn_flags: NodeFlags, + mn_ksize: u16, + mv_size: u32, + key: Vec<u8>, + overflow_pgno: u64, + }, + SubData { + mn_lo: u16, + mn_hi: u16, + mn_flags: NodeFlags, + mn_ksize: u16, + mv_size: u32, + key: Vec<u8>, + value: Vec<u8>, + db: Database, + }, +} + +#[derive(Debug, Default)] +struct BranchNode { + mp_pgno: u64, + mn_ksize: u16, + mn_data: Vec<u8>, +} + +#[derive(Debug)] +enum PageHeader { + Regular { + mp_pgno: u64, + mp_flags: PageFlags, + pb_lower: u16, + pb_upper: u16, + }, + Overflow { + mp_pgno: u64, + mp_flags: PageFlags, + pb_pages: u32, + }, +} + +#[derive(Debug)] +enum Page { + META(MetaData), + LEAF(Vec<LeafNode>), + BRANCH(Vec<BranchNode>), +} + +impl Page { + fn new(buf: Vec<u8>, bits: Bits) -> MigrateResult<Page> { + let mut cursor = std::io::Cursor::new(&buf[..]); + + match Self::parse_page_header(&mut cursor, bits)? { + PageHeader::Regular { + mp_flags, + pb_lower, + .. + } => { + if mp_flags.contains(PageFlags::LEAF2) || mp_flags.contains(PageFlags::SUBP) { + // We don't yet support DUPFIXED and DUPSORT databases. + return Err(MigrateError::UnsupportedPageHeaderVariant); + } + + if mp_flags.contains(PageFlags::META) { + let meta_data = Self::parse_meta_data(&mut cursor, bits)?; + Ok(Page::META(meta_data)) + } else if mp_flags.contains(PageFlags::LEAF) { + let nodes = Self::parse_leaf_nodes(&mut cursor, pb_lower, bits)?; + Ok(Page::LEAF(nodes)) + } else if mp_flags.contains(PageFlags::BRANCH) { + let nodes = Self::parse_branch_nodes(&mut cursor, pb_lower, bits)?; + Ok(Page::BRANCH(nodes)) + } else { + Err(MigrateError::UnexpectedPageHeaderVariant) + } + }, + PageHeader::Overflow { + .. + } => { + // There isn't anything to do, nor should we try to instantiate + // a page of this type, as we only access them when reading + // a value that is too large to fit into a leaf node. + Err(MigrateError::UnexpectedPageHeaderVariant) + }, + } + } + + fn parse_page_header(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<PageHeader> { + let mp_pgno = cursor.read_uint::<LittleEndian>(bits.size())?; + let _mp_pad = cursor.read_u16::<LittleEndian>()?; + let mp_flags = PageFlags::from_bits(cursor.read_u16::<LittleEndian>()?).ok_or(MigrateError::InvalidPageBits)?; + + if mp_flags.contains(PageFlags::OVERFLOW) { + let pb_pages = cursor.read_u32::<LittleEndian>()?; + Ok(PageHeader::Overflow { + mp_pgno, + mp_flags, + pb_pages, + }) + } else { + let pb_lower = cursor.read_u16::<LittleEndian>()?; + let pb_upper = cursor.read_u16::<LittleEndian>()?; + Ok(PageHeader::Regular { + mp_pgno, + mp_flags, + pb_lower, + pb_upper, + }) + } + } + + fn parse_meta_data(mut cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<MetaData> { + cursor.seek(SeekFrom::Start(page_header_size(bits)))?; + + Ok(MetaData { + mm_magic: cursor.read_u32::<LittleEndian>()?, + mm_version: cursor.read_u32::<LittleEndian>()?, + mm_address: cursor.read_uint::<LittleEndian>(bits.size())?, + mm_mapsize: cursor.read_uint::<LittleEndian>(bits.size())?, + mm_dbs: Databases { + free: Database::new(&mut cursor, bits)?, + main: Database::new(&mut cursor, bits)?, + }, + mm_last_pg: cursor.read_uint::<LittleEndian>(bits.size())?, + mm_txnid: cursor.read_uint::<LittleEndian>(bits.size())?, + }) + } + + fn parse_leaf_nodes(cursor: &mut Cursor<&[u8]>, pb_lower: u16, bits: Bits) -> MigrateResult<Vec<LeafNode>> { + cursor.set_position(page_header_size(bits)); + let num_keys = Self::num_keys(pb_lower, bits); + let mp_ptrs = Self::parse_mp_ptrs(cursor, num_keys)?; + + let mut leaf_nodes = Vec::with_capacity(num_keys as usize); + + for mp_ptr in mp_ptrs { + cursor.set_position(u64::from(mp_ptr)); + leaf_nodes.push(Self::parse_leaf_node(cursor, bits)?); + } + + Ok(leaf_nodes) + } + + fn parse_leaf_node(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<LeafNode> { + // The order of the mn_lo and mn_hi fields is endian-dependent and would be + // reversed in an LMDB environment created on a big-endian system. + let mn_lo = cursor.read_u16::<LittleEndian>()?; + let mn_hi = cursor.read_u16::<LittleEndian>()?; + + let mn_flags = NodeFlags::from_bits(cursor.read_u16::<LittleEndian>()?).ok_or(MigrateError::InvalidNodeBits)?; + let mn_ksize = cursor.read_u16::<LittleEndian>()?; + + let start = usize::try_from(cursor.position())?; + let end = usize::try_from(cursor.position() + u64::from(mn_ksize))?; + let key = cursor.get_ref()[start..end].to_vec(); + cursor.set_position(end as u64); + + let mv_size = Self::leaf_node_size(mn_lo, mn_hi); + if mn_flags.contains(NodeFlags::BIGDATA) { + let overflow_pgno = cursor.read_uint::<LittleEndian>(bits.size())?; + Ok(LeafNode::BigData { + mn_lo, + mn_hi, + mn_flags, + mn_ksize, + mv_size, + key, + overflow_pgno, + }) + } else if mn_flags.contains(NodeFlags::SUBDATA) { + let start = usize::try_from(cursor.position())?; + let end = usize::try_from(cursor.position() + u64::from(mv_size))?; + let value = cursor.get_ref()[start..end].to_vec(); + let mut cursor = std::io::Cursor::new(&value[..]); + let db = Database::new(&mut cursor, bits)?; + validate_page_num(db.md_root, bits)?; + Ok(LeafNode::SubData { + mn_lo, + mn_hi, + mn_flags, + mn_ksize, + mv_size, + key, + value, + db, + }) + } else { + let start = usize::try_from(cursor.position())?; + let end = usize::try_from(cursor.position() + u64::from(mv_size))?; + let value = cursor.get_ref()[start..end].to_vec(); + Ok(LeafNode::Regular { + mn_lo, + mn_hi, + mn_flags, + mn_ksize, + mv_size, + key, + value, + }) + } + } + + fn leaf_node_size(mn_lo: u16, mn_hi: u16) -> u32 { + u32::from(mn_lo) + ((u32::from(mn_hi)) << 16) + } + + fn parse_branch_nodes(cursor: &mut Cursor<&[u8]>, pb_lower: u16, bits: Bits) -> MigrateResult<Vec<BranchNode>> { + let num_keys = Self::num_keys(pb_lower, bits); + let mp_ptrs = Self::parse_mp_ptrs(cursor, num_keys)?; + + let mut branch_nodes = Vec::with_capacity(num_keys as usize); + + for mp_ptr in mp_ptrs { + cursor.set_position(u64::from(mp_ptr)); + branch_nodes.push(Self::parse_branch_node(cursor, bits)?) + } + + Ok(branch_nodes) + } + + fn parse_branch_node(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<BranchNode> { + // The order of the mn_lo and mn_hi fields is endian-dependent and would be + // reversed in an LMDB environment created on a big-endian system. + let mn_lo = cursor.read_u16::<LittleEndian>()?; + let mn_hi = cursor.read_u16::<LittleEndian>()?; + + let mn_flags = cursor.read_u16::<LittleEndian>()?; + + // Branch nodes overload the mn_lo, mn_hi, and mn_flags fields to store the page + // number, so we derive the number from those fields. + let mp_pgno = Self::branch_node_page_num(mn_lo, mn_hi, mn_flags, bits); + + let mn_ksize = cursor.read_u16::<LittleEndian>()?; + + let position = cursor.position(); + let start = usize::try_from(position)?; + let end = usize::try_from(position + u64::from(mn_ksize))?; + let mn_data = cursor.get_ref()[start..end].to_vec(); + cursor.set_position(end as u64); + + Ok(BranchNode { + mp_pgno, + mn_ksize, + mn_data, + }) + } + + fn branch_node_page_num(mn_lo: u16, mn_hi: u16, mn_flags: u16, bits: Bits) -> u64 { + let mut page_num = u64::from(u32::from(mn_lo) + (u32::from(mn_hi) << 16)); + if bits == Bits::U64 { + page_num += u64::from(mn_flags) << 32; + } + page_num + } + + fn parse_mp_ptrs(cursor: &mut Cursor<&[u8]>, num_keys: u64) -> MigrateResult<Vec<u16>> { + let mut mp_ptrs = Vec::with_capacity(num_keys as usize); + for _ in 0..num_keys { + mp_ptrs.push(cursor.read_u16::<LittleEndian>()?); + } + Ok(mp_ptrs) + } + + fn num_keys(pb_lower: u16, bits: Bits) -> u64 { + (u64::from(pb_lower) - page_header_size(bits)) >> 1 + } +} + +pub struct Migrator { + file: File, + bits: Bits, +} + +impl Migrator { + /// Create a new Migrator for the LMDB environment at the given path. This tries to + /// open the data.mdb file in the environment and determine the bit depth of the + /// executable that created it, so it can fail and return an Err if the file can't be + /// opened or the depth determined. + pub fn new(path: &Path) -> MigrateResult<Migrator> { + let mut path = PathBuf::from(path); + path.push("data.mdb"); + let mut file = File::open(&path)?; + + file.seek(SeekFrom::Start(page_header_size(Bits::U32)))?; + let mut buf = [0; 4]; + file.read_exact(&mut buf)?; + + let bits = if buf == MAGIC { + Bits::U32 + } else { + file.seek(SeekFrom::Start(page_header_size(Bits::U64)))?; + file.read_exact(&mut buf)?; + if buf == MAGIC { + Bits::U64 + } else { + return Err(MigrateError::IndeterminateBitDepth); + } + }; + + Ok(Migrator { + file, + bits, + }) + } + + /// Dump the data in one of the databases in the LMDB environment. If the `database` + /// paremeter is None, then we dump the data in the main database. If it's the name + /// of a subdatabase, then we dump the data in that subdatabase. + /// + /// Note that the output isn't identical to that of the `mdb_dump` utility, since + /// `mdb_dump` includes subdatabase key/value pairs when dumping the main database, + /// and those values are architecture-dependent, since they contain pointer-sized + /// data. + /// + /// If we wanted to support identical output, we could parameterize inclusion of + /// subdatabase pairs in get_pairs() and include them when dumping data, while + /// continuing to exclude them when migrating data. + pub fn dump<T: Write>(&mut self, database: Option<&str>, mut out: T) -> MigrateResult<()> { + let meta_data = self.get_meta_data()?; + let root_page_num = meta_data.mm_dbs.main.md_root; + let root_page = Rc::new(self.get_page(root_page_num)?); + + let pairs; + if let Some(database) = database { + let subdbs = self.get_subdbs(root_page)?; + let database = + subdbs.get(database.as_bytes()).ok_or_else(|| MigrateError::DatabaseNotFound(database.to_string()))?; + let root_page_num = database.md_root; + let root_page = Rc::new(self.get_page(root_page_num)?); + pairs = self.get_pairs(root_page)?; + } else { + pairs = self.get_pairs(root_page)?; + } + + out.write_all(b"VERSION=3\n")?; + out.write_all(b"format=bytevalue\n")?; + if let Some(database) = database { + writeln!(out, "database={}", database)?; + } + out.write_all(b"type=btree\n")?; + writeln!(out, "mapsize={}", meta_data.mm_mapsize)?; + out.write_all(b"maxreaders=126\n")?; + out.write_all(b"db_pagesize=4096\n")?; + out.write_all(b"HEADER=END\n")?; + + for (key, value) in pairs { + out.write_all(b" ")?; + for byte in key { + write!(out, "{:02x}", byte)?; + } + out.write_all(b"\n")?; + out.write_all(b" ")?; + for byte in value { + write!(out, "{:02x}", byte)?; + } + out.write_all(b"\n")?; + } + + out.write_all(b"DATA=END\n")?; + + Ok(()) + } + + /// Migrate all data in all of databases in the existing LMDB environment to a new + /// environment. This includes all key/value pairs in the main database that aren't + /// metadata about subdatabases and all key/value pairs in all subdatabases. + /// + /// We also set the map size and maximum databases of the new environment to their + /// values for the existing environment. But we don't set other metadata, and we + /// don't check that the new environment is empty before migrating data. + /// + /// Thus it's possible for this to overwrite existing data or fail to migrate data if + /// the new environment isn't empty. It's the consumer's responsibility to ensure + /// that data can be safely migrated to the new environment. In general, this means + /// that environment should be empty. + pub fn migrate(&mut self, dest: &Path) -> MigrateResult<()> { + let meta_data = self.get_meta_data()?; + let root_page_num = meta_data.mm_dbs.main.md_root; + validate_page_num(root_page_num, self.bits)?; + let root_page = Rc::new(self.get_page(root_page_num)?); + let subdbs = self.get_subdbs(Rc::clone(&root_page))?; + + let env = Environment::new() + .set_map_size(meta_data.mm_mapsize as usize) + .set_max_dbs(subdbs.len() as u32) + .open(dest)?; + + // Create the databases before we open a read-write transaction, since database + // creation requires its own read-write transaction, which would hang while + // awaiting completion of an existing one. + env.create_db(None, meta_data.mm_dbs.main.md_flags)?; + for (subdb_name, subdb_info) in &subdbs { + env.create_db(Some(str::from_utf8(&subdb_name)?), subdb_info.md_flags)?; + } + + // Now open the read-write transaction that we'll use to migrate all the data. + let mut txn = env.begin_rw_txn()?; + + // Migrate the main database. + let pairs = self.get_pairs(root_page)?; + let db = env.open_db(None)?; + for (key, value) in pairs { + // If we knew that the target database was empty, we could specify + // WriteFlags::APPEND to speed up the migration. + txn.put(db, &key, &value, WriteFlags::empty())?; + } + + // Migrate subdatabases. + for (subdb_name, subdb_info) in &subdbs { + let root_page = Rc::new(self.get_page(subdb_info.md_root)?); + let pairs = self.get_pairs(root_page)?; + let db = env.open_db(Some(str::from_utf8(&subdb_name)?))?; + for (key, value) in pairs { + // If we knew that the target database was empty, we could specify + // WriteFlags::APPEND to speed up the migration. + txn.put(db, &key, &value, WriteFlags::empty())?; + } + } + + txn.commit()?; + + Ok(()) + } + + fn get_subdbs(&mut self, root_page: Rc<Page>) -> MigrateResult<HashMap<Vec<u8>, Database>> { + let mut subdbs = HashMap::new(); + let mut pages = vec![root_page]; + + while let Some(page) = pages.pop() { + match &*page { + Page::BRANCH(nodes) => { + for branch in nodes { + pages.push(Rc::new(self.get_page(branch.mp_pgno)?)); + } + }, + Page::LEAF(nodes) => { + for leaf in nodes { + if let LeafNode::SubData { + key, + db, + .. + } = leaf + { + subdbs.insert(key.to_vec(), db.clone()); + }; + } + }, + _ => { + return Err(MigrateError::UnexpectedPageVariant); + }, + } + } + + Ok(subdbs) + } + + fn get_pairs(&mut self, root_page: Rc<Page>) -> MigrateResult<BTreeMap<Vec<u8>, Vec<u8>>> { + let mut pairs = BTreeMap::new(); + let mut pages = vec![root_page]; + + while let Some(page) = pages.pop() { + match &*page { + Page::BRANCH(nodes) => { + for branch in nodes { + pages.push(Rc::new(self.get_page(branch.mp_pgno)?)); + } + }, + Page::LEAF(nodes) => { + for leaf in nodes { + match leaf { + LeafNode::Regular { + key, + value, + .. + } => { + pairs.insert(key.to_vec(), value.to_vec()); + }, + LeafNode::BigData { + mv_size, + key, + overflow_pgno, + .. + } => { + // Perhaps we could reduce memory consumption during a + // migration by waiting to read big data until it's time + // to write it to the new database. + let value = self.read_data( + *overflow_pgno * u64::from(PAGESIZE) + page_header_size(self.bits), + *mv_size as usize, + )?; + pairs.insert(key.to_vec(), value); + }, + LeafNode::SubData { + .. + } => { + // We don't include subdatabase leaves in pairs, since + // there's no architecture-neutral representation of them, + // and in any case they're meta-data that should get + // recreated when we migrate the subdatabases themselves. + // + // If we wanted to create identical dumps to those + // produced by `mdb_dump`, however, we could allow + // consumers to specify that they'd like to include these + // records. + }, + }; + } + }, + _ => { + return Err(MigrateError::UnexpectedPageVariant); + }, + } + } + + Ok(pairs) + } + + fn read_data(&mut self, offset: u64, size: usize) -> MigrateResult<Vec<u8>> { + self.file.seek(SeekFrom::Start(offset))?; + let mut buf: Vec<u8> = vec![0; size]; + self.file.read_exact(&mut buf[0..size])?; + Ok(buf.to_vec()) + } + + fn get_page(&mut self, page_no: u64) -> MigrateResult<Page> { + Page::new(self.read_data(page_no * u64::from(PAGESIZE), usize::from(PAGESIZE))?, self.bits) + } + + fn get_meta_data(&mut self) -> MigrateResult<MetaData> { + let (page0, page1) = (self.get_page(0)?, self.get_page(1)?); + + match (page0, page1) { + (Page::META(meta0), Page::META(meta1)) => { + let meta = if meta1.mm_txnid > meta0.mm_txnid { + meta1 + } else { + meta0 + }; + if meta.mm_magic != 0xBE_EF_C0_DE { + return Err(MigrateError::InvalidMagicNum); + } + if meta.mm_version != 1 && meta.mm_version != 999 { + return Err(MigrateError::InvalidDataVersion); + } + Ok(meta) + }, + _ => Err(MigrateError::UnexpectedPageVariant), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use std::{ + env, + fs, + mem::size_of, + }; + + use lmdb::{ + Environment, + Error as LmdbError, + }; + use tempfile::{ + tempdir, + tempfile, + }; + + fn compare_files(ref_file: &mut File, new_file: &mut File) -> MigrateResult<()> { + ref_file.seek(SeekFrom::Start(0))?; + new_file.seek(SeekFrom::Start(0))?; + + let ref_buf = &mut [0; 1024]; + let new_buf = &mut [0; 1024]; + + loop { + match ref_file.read(ref_buf) { + Err(err) => panic!(err), + Ok(ref_len) => { + match new_file.read(new_buf) { + Err(err) => panic!(err), + Ok(new_len) => { + assert_eq!(ref_len, new_len); + if ref_len == 0 { + break; + }; + assert_eq!(ref_buf[0..ref_len], new_buf[0..new_len]); + }, + } + }, + } + } + + Ok(()) + } + + #[test] + fn test_dump_32() -> MigrateResult<()> { + let cwd = env::current_dir()?; + let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; + let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_32"].iter().collect(); + + // Dump data from the test env to a new dump file. + let mut migrator = Migrator::new(&test_env_path)?; + let mut new_dump_file = tempfile()?; + migrator.dump(None, &new_dump_file)?; + + // Open the reference dump file. + let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump.txt"].iter().collect(); + let mut ref_dump_file = File::open(ref_dump_file_path)?; + + // Compare the new dump file to the reference dump file. + compare_files(&mut ref_dump_file, &mut new_dump_file)?; + + Ok(()) + } + + #[test] + fn test_dump_32_subdb() -> MigrateResult<()> { + let cwd = env::current_dir()?; + let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; + let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_32"].iter().collect(); + + // Dump data from the test env to a new dump file. + let mut migrator = Migrator::new(&test_env_path)?; + let mut new_dump_file = tempfile()?; + migrator.dump(Some("subdb"), &new_dump_file)?; + + // Open the reference dump file. + let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"].iter().collect(); + let mut ref_dump_file = File::open(ref_dump_file_path)?; + + // Compare the new dump file to the reference dump file. + compare_files(&mut ref_dump_file, &mut new_dump_file)?; + + Ok(()) + } + + #[test] + fn test_dump_64() -> MigrateResult<()> { + let cwd = env::current_dir()?; + let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; + let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_64"].iter().collect(); + + // Dump data from the test env to a new dump file. + let mut migrator = Migrator::new(&test_env_path)?; + let mut new_dump_file = tempfile()?; + migrator.dump(None, &new_dump_file)?; + + // Open the reference dump file. + let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump.txt"].iter().collect(); + let mut ref_dump_file = File::open(ref_dump_file_path)?; + + // Compare the new dump file to the reference dump file. + compare_files(&mut ref_dump_file, &mut new_dump_file)?; + + Ok(()) + } + + #[test] + fn test_dump_64_subdb() -> MigrateResult<()> { + let cwd = env::current_dir()?; + let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; + let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_64"].iter().collect(); + + // Dump data from the test env to a new dump file. + let mut migrator = Migrator::new(&test_env_path)?; + let mut new_dump_file = tempfile()?; + migrator.dump(Some("subdb"), &new_dump_file)?; + + // Open the reference dump file. + let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"].iter().collect(); + let mut ref_dump_file = File::open(ref_dump_file_path)?; + + // Compare the new dump file to the reference dump file. + compare_files(&mut ref_dump_file, &mut new_dump_file)?; + + Ok(()) + } + + #[test] + fn test_migrate_64() -> MigrateResult<()> { + let cwd = env::current_dir()?; + let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; + let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_64"].iter().collect(); + + // Migrate data from the old env to a new one. + let new_env = tempdir()?; + let mut migrator = Migrator::new(&test_env_path)?; + migrator.migrate(new_env.path())?; + + // Dump data from the new env to a new dump file. + let mut migrator = Migrator::new(&new_env.path())?; + let mut new_dump_file = tempfile()?; + migrator.dump(Some("subdb"), &new_dump_file)?; + + // Open the reference dump file. + let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"].iter().collect(); + let mut ref_dump_file = File::open(ref_dump_file_path)?; + + // Compare the new dump file to the reference dump file. + compare_files(&mut ref_dump_file, &mut new_dump_file)?; + + Ok(()) + } + + #[test] + fn test_migrate_32() -> MigrateResult<()> { + let cwd = env::current_dir()?; + let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; + let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_32"].iter().collect(); + + // Migrate data from the old env to a new one. + let new_env = tempdir()?; + let mut migrator = Migrator::new(&test_env_path)?; + migrator.migrate(new_env.path())?; + + // Dump data from the new env to a new dump file. + let mut migrator = Migrator::new(&new_env.path())?; + let mut new_dump_file = tempfile()?; + migrator.dump(Some("subdb"), &new_dump_file)?; + + // Open the reference dump file. + let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"].iter().collect(); + let mut ref_dump_file = File::open(ref_dump_file_path)?; + + // Compare the new dump file to the reference dump file. + compare_files(&mut ref_dump_file, &mut new_dump_file)?; + + Ok(()) + } + + #[test] + fn test_migrate_and_replace() -> MigrateResult<()> { + let test_env_name = match size_of::<usize>() { + 4 => "ref_env_64", + 8 => "ref_env_32", + _ => panic!("only 32- and 64-bit depths are supported"), + }; + + let cwd = env::current_dir()?; + let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; + let test_env_path: PathBuf = [cwd, "tests", "envs", test_env_name].iter().collect(); + + let old_env = tempdir()?; + fs::copy(test_env_path.join("data.mdb"), old_env.path().join("data.mdb"))?; + fs::copy(test_env_path.join("lock.mdb"), old_env.path().join("lock.mdb"))?; + + // Confirm that it isn't possible to open the old environment with LMDB. + assert_eq!( + match Environment::new().open(&old_env.path()) { + Err(err) => err, + _ => panic!("opening the environment should have failed"), + }, + LmdbError::Invalid + ); + + // Migrate data from the old env to a new one. + let new_env = tempdir()?; + let mut migrator = Migrator::new(&old_env.path())?; + migrator.migrate(new_env.path())?; + + // Dump data from the new env to a new dump file. + let mut migrator = Migrator::new(&new_env.path())?; + let mut new_dump_file = tempfile()?; + migrator.dump(Some("subdb"), &new_dump_file)?; + + // Open the reference dump file. + let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"].iter().collect(); + let mut ref_dump_file = File::open(ref_dump_file_path)?; + + // Compare the new dump file to the reference dump file. + compare_files(&mut ref_dump_file, &mut new_dump_file)?; + + // Overwrite the old env's files with the new env's files and confirm that it's now + // possible to open the old env with LMDB. + fs::copy(new_env.path().join("data.mdb"), old_env.path().join("data.mdb"))?; + fs::copy(new_env.path().join("lock.mdb"), old_env.path().join("lock.mdb"))?; + assert!(Environment::new().open(&old_env.path()).is_ok()); + + Ok(()) + } +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/arch_migrator_error.rs b/third_party/rust/rkv/src/backend/impl_lmdb/arch_migrator_error.rs new file mode 100644 index 0000000000..7b56f5e96b --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/arch_migrator_error.rs @@ -0,0 +1,107 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + io, + num, + str, +}; + +use failure::Fail; + +#[derive(Debug, Fail)] +pub enum MigrateError { + #[fail(display = "database not found: {:?}", _0)] + DatabaseNotFound(String), + + #[fail(display = "{}", _0)] + FromString(String), + + #[fail(display = "couldn't determine bit depth")] + IndeterminateBitDepth, + + #[fail(display = "I/O error: {:?}", _0)] + IoError(io::Error), + + #[fail(display = "invalid DatabaseFlags bits")] + InvalidDatabaseBits, + + #[fail(display = "invalid data version")] + InvalidDataVersion, + + #[fail(display = "invalid magic number")] + InvalidMagicNum, + + #[fail(display = "invalid NodeFlags bits")] + InvalidNodeBits, + + #[fail(display = "invalid PageFlags bits")] + InvalidPageBits, + + #[fail(display = "invalid page number")] + InvalidPageNum, + + #[fail(display = "lmdb backend error: {}", _0)] + LmdbError(lmdb::Error), + + #[fail(display = "string conversion error")] + StringConversionError, + + #[fail(display = "TryFromInt error: {:?}", _0)] + TryFromIntError(num::TryFromIntError), + + #[fail(display = "unexpected Page variant")] + UnexpectedPageVariant, + + #[fail(display = "unexpected PageHeader variant")] + UnexpectedPageHeaderVariant, + + #[fail(display = "unsupported PageHeader variant")] + UnsupportedPageHeaderVariant, + + #[fail(display = "UTF8 error: {:?}", _0)] + Utf8Error(str::Utf8Error), +} + +impl From<io::Error> for MigrateError { + fn from(e: io::Error) -> MigrateError { + MigrateError::IoError(e) + } +} + +impl From<str::Utf8Error> for MigrateError { + fn from(e: str::Utf8Error) -> MigrateError { + MigrateError::Utf8Error(e) + } +} + +impl From<num::TryFromIntError> for MigrateError { + fn from(e: num::TryFromIntError) -> MigrateError { + MigrateError::TryFromIntError(e) + } +} + +impl From<&str> for MigrateError { + fn from(e: &str) -> MigrateError { + MigrateError::FromString(e.to_string()) + } +} + +impl From<String> for MigrateError { + fn from(e: String) -> MigrateError { + MigrateError::FromString(e) + } +} + +impl From<lmdb::Error> for MigrateError { + fn from(e: lmdb::Error) -> MigrateError { + MigrateError::LmdbError(e) + } +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/cursor.rs b/third_party/rust/rkv/src/backend/impl_lmdb/cursor.rs new file mode 100644 index 0000000000..760abce451 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/cursor.rs @@ -0,0 +1,69 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use lmdb::Cursor; + +use super::IterImpl; +use crate::backend::traits::BackendRoCursor; + +#[derive(Debug)] +pub struct RoCursorImpl<'c>(pub(crate) lmdb::RoCursor<'c>); + +impl<'c> BackendRoCursor<'c> for RoCursorImpl<'c> { + type Iter = IterImpl<'c, lmdb::RoCursor<'c>>; + + fn into_iter(self) -> Self::Iter { + // We call RoCursor.iter() instead of RoCursor.iter_start() because + // the latter panics when there are no items in the store, whereas the + // former returns an iterator that yields no items. And since we create + // the Cursor and don't change its position, we can be sure that a call + // to Cursor.iter() will start at the beginning. + IterImpl::new(self.0, lmdb::RoCursor::iter) + } + + fn into_iter_from<K>(self, key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c, + { + IterImpl::new(self.0, |cursor| cursor.iter_from(key)) + } + + fn into_iter_dup_of<K>(self, key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c, + { + IterImpl::new(self.0, |cursor| cursor.iter_dup_of(key)) + } +} + +#[derive(Debug)] +pub struct RwCursorImpl<'c>(pub(crate) lmdb::RwCursor<'c>); + +impl<'c> BackendRoCursor<'c> for RwCursorImpl<'c> { + type Iter = IterImpl<'c, lmdb::RwCursor<'c>>; + + fn into_iter(self) -> Self::Iter { + IterImpl::new(self.0, lmdb::RwCursor::iter) + } + + fn into_iter_from<K>(self, key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c, + { + IterImpl::new(self.0, |cursor| cursor.iter_from(key)) + } + + fn into_iter_dup_of<K>(self, key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c, + { + IterImpl::new(self.0, |cursor| cursor.iter_dup_of(key)) + } +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/database.rs b/third_party/rust/rkv/src/backend/impl_lmdb/database.rs new file mode 100644 index 0000000000..8edee5c2c3 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/database.rs @@ -0,0 +1,16 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use crate::backend::traits::BackendDatabase; + +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub struct DatabaseImpl(pub(crate) lmdb::Database); + +impl BackendDatabase for DatabaseImpl {} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/environment.rs b/third_party/rust/rkv/src/backend/impl_lmdb/environment.rs new file mode 100644 index 0000000000..608df5c311 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/environment.rs @@ -0,0 +1,275 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + fs, + path::{ + Path, + PathBuf, + }, +}; + +use lmdb::Error as LmdbError; + +use super::{ + DatabaseFlagsImpl, + DatabaseImpl, + EnvironmentFlagsImpl, + ErrorImpl, + InfoImpl, + RoTransactionImpl, + RwTransactionImpl, + StatImpl, +}; +use crate::backend::traits::{ + BackendEnvironment, + BackendEnvironmentBuilder, + BackendInfo, + BackendIter, + BackendRoCursor, + BackendRoCursorTransaction, + BackendStat, +}; + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub struct EnvironmentBuilderImpl { + builder: lmdb::EnvironmentBuilder, + env_path_type: EnvironmentPathType, + env_lock_type: EnvironmentLockType, + env_db_type: EnvironmentDefaultDbType, + make_dir_if_needed: bool, +} + +impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl { + type Environment = EnvironmentImpl; + type Error = ErrorImpl; + type Flags = EnvironmentFlagsImpl; + + fn new() -> EnvironmentBuilderImpl { + EnvironmentBuilderImpl { + builder: lmdb::Environment::new(), + env_path_type: EnvironmentPathType::SubDir, + env_lock_type: EnvironmentLockType::Lockfile, + env_db_type: EnvironmentDefaultDbType::SingleDatabase, + make_dir_if_needed: false, + } + } + + fn set_flags<T>(&mut self, flags: T) -> &mut Self + where + T: Into<Self::Flags>, + { + let flags = flags.into(); + if flags.0 == lmdb::EnvironmentFlags::NO_SUB_DIR { + self.env_path_type = EnvironmentPathType::NoSubDir; + } + if flags.0 == lmdb::EnvironmentFlags::NO_LOCK { + self.env_lock_type = EnvironmentLockType::NoLockfile; + } + self.builder.set_flags(flags.0); + self + } + + fn set_max_readers(&mut self, max_readers: u32) -> &mut Self { + self.builder.set_max_readers(max_readers); + self + } + + fn set_max_dbs(&mut self, max_dbs: u32) -> &mut Self { + if max_dbs > 0 { + self.env_db_type = EnvironmentDefaultDbType::MultipleNamedDatabases + } + self.builder.set_max_dbs(max_dbs); + self + } + + fn set_map_size(&mut self, size: usize) -> &mut Self { + self.builder.set_map_size(size); + self + } + + fn set_make_dir_if_needed(&mut self, make_dir_if_needed: bool) -> &mut Self { + self.make_dir_if_needed = make_dir_if_needed; + self + } + + fn set_discard_if_corrupted(&mut self, _discard_if_corrupted: bool) -> &mut Self { + // Unfortunately, when opening a database, LMDB doesn't handle all the ways it could have + // been corrupted. Prefer using the `SafeMode` backend if this is important. + unimplemented!(); + } + + fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error> { + match self.env_path_type { + EnvironmentPathType::NoSubDir => { + if !path.is_file() { + return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into())); + } + }, + EnvironmentPathType::SubDir => { + if !path.is_dir() { + if !self.make_dir_if_needed { + return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into())); + } + fs::create_dir_all(path)?; + } + }, + } + + self.builder.open(path).map_err(ErrorImpl::LmdbError).and_then(|lmdbenv| { + EnvironmentImpl::new(path, self.env_path_type, self.env_lock_type, self.env_db_type, lmdbenv) + }) + } +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub enum EnvironmentPathType { + SubDir, + NoSubDir, +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub enum EnvironmentLockType { + Lockfile, + NoLockfile, +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub enum EnvironmentDefaultDbType { + SingleDatabase, + MultipleNamedDatabases, +} + +#[derive(Debug)] +pub struct EnvironmentImpl { + path: PathBuf, + env_path_type: EnvironmentPathType, + env_lock_type: EnvironmentLockType, + env_db_type: EnvironmentDefaultDbType, + lmdbenv: lmdb::Environment, +} + +impl EnvironmentImpl { + pub(crate) fn new( + path: &Path, + env_path_type: EnvironmentPathType, + env_lock_type: EnvironmentLockType, + env_db_type: EnvironmentDefaultDbType, + lmdbenv: lmdb::Environment, + ) -> Result<EnvironmentImpl, ErrorImpl> { + Ok(EnvironmentImpl { + path: path.to_path_buf(), + env_path_type, + env_lock_type, + env_db_type, + lmdbenv, + }) + } +} + +impl<'e> BackendEnvironment<'e> for EnvironmentImpl { + type Database = DatabaseImpl; + type Error = ErrorImpl; + type Flags = DatabaseFlagsImpl; + type Info = InfoImpl; + type RoTransaction = RoTransactionImpl<'e>; + type RwTransaction = RwTransactionImpl<'e>; + type Stat = StatImpl; + + fn get_dbs(&self) -> Result<Vec<Option<String>>, Self::Error> { + if self.env_db_type == EnvironmentDefaultDbType::SingleDatabase { + return Ok(vec![None]); + } + let db = self.lmdbenv.open_db(None).map(DatabaseImpl).map_err(ErrorImpl::LmdbError)?; + let reader = self.begin_ro_txn()?; + let cursor = reader.open_ro_cursor(&db)?; + let mut iter = cursor.into_iter(); + let mut store = vec![]; + while let Some(result) = iter.next() { + let (key, _) = result?; + let name = String::from_utf8(key.to_owned()).map_err(|_| ErrorImpl::LmdbError(lmdb::Error::Corrupted))?; + store.push(Some(name)); + } + Ok(store) + } + + fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error> { + self.lmdbenv.open_db(name).map(DatabaseImpl).map_err(ErrorImpl::LmdbError) + } + + fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result<Self::Database, Self::Error> { + self.lmdbenv.create_db(name, flags.0).map(DatabaseImpl).map_err(ErrorImpl::LmdbError) + } + + fn begin_ro_txn(&'e self) -> Result<Self::RoTransaction, Self::Error> { + self.lmdbenv.begin_ro_txn().map(RoTransactionImpl).map_err(ErrorImpl::LmdbError) + } + + fn begin_rw_txn(&'e self) -> Result<Self::RwTransaction, Self::Error> { + self.lmdbenv.begin_rw_txn().map(RwTransactionImpl).map_err(ErrorImpl::LmdbError) + } + + fn sync(&self, force: bool) -> Result<(), Self::Error> { + self.lmdbenv.sync(force).map_err(ErrorImpl::LmdbError) + } + + fn stat(&self) -> Result<Self::Stat, Self::Error> { + self.lmdbenv.stat().map(StatImpl).map_err(ErrorImpl::LmdbError) + } + + fn info(&self) -> Result<Self::Info, Self::Error> { + self.lmdbenv.info().map(InfoImpl).map_err(ErrorImpl::LmdbError) + } + + fn freelist(&self) -> Result<usize, Self::Error> { + self.lmdbenv.freelist().map_err(ErrorImpl::LmdbError) + } + + fn load_ratio(&self) -> Result<Option<f32>, Self::Error> { + let stat = self.stat()?; + let info = self.info()?; + let freelist = self.freelist()?; + + let last_pgno = info.last_pgno() + 1; // pgno is 0 based. + let total_pgs = info.map_size() / stat.page_size(); + if freelist > last_pgno { + return Err(ErrorImpl::LmdbError(LmdbError::Corrupted)); + } + let used_pgs = last_pgno - freelist; + Ok(Some(used_pgs as f32 / total_pgs as f32)) + } + + fn set_map_size(&self, size: usize) -> Result<(), Self::Error> { + self.lmdbenv.set_map_size(size).map_err(ErrorImpl::LmdbError) + } + + fn get_files_on_disk(&self) -> Vec<PathBuf> { + let mut store = vec![]; + + if self.env_path_type == EnvironmentPathType::NoSubDir { + // The option NO_SUB_DIR could change the default directory layout; therefore this should + // probably return the path used to create environment, along with the custom lockfile + // when available. + unimplemented!(); + } + + let mut db_filename = self.path.clone(); + db_filename.push("data.mdb"); + store.push(db_filename); + + if self.env_lock_type == EnvironmentLockType::Lockfile { + let mut lock_filename = self.path.clone(); + lock_filename.push("lock.mdb"); + store.push(lock_filename); + } + + store + } +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/error.rs b/third_party/rust/rkv/src/backend/impl_lmdb/error.rs new file mode 100644 index 0000000000..646e8f3fe3 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/error.rs @@ -0,0 +1,62 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + fmt, + io, + path::PathBuf, +}; + +use crate::{ + backend::traits::BackendError, + error::StoreError, +}; + +#[derive(Debug)] +pub enum ErrorImpl { + LmdbError(lmdb::Error), + UnsuitableEnvironmentPath(PathBuf), + IoError(io::Error), +} + +impl BackendError for ErrorImpl {} + +impl fmt::Display for ErrorImpl { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match self { + ErrorImpl::LmdbError(e) => e.fmt(fmt), + ErrorImpl::UnsuitableEnvironmentPath(_) => write!(fmt, "UnsuitableEnvironmentPath"), + ErrorImpl::IoError(e) => e.fmt(fmt), + } + } +} + +impl Into<StoreError> for ErrorImpl { + fn into(self) -> StoreError { + match self { + ErrorImpl::LmdbError(lmdb::Error::Corrupted) => StoreError::DatabaseCorrupted, + ErrorImpl::LmdbError(lmdb::Error::NotFound) => StoreError::KeyValuePairNotFound, + ErrorImpl::LmdbError(lmdb::Error::BadValSize) => StoreError::KeyValuePairBadSize, + ErrorImpl::LmdbError(lmdb::Error::Invalid) => StoreError::FileInvalid, + ErrorImpl::LmdbError(lmdb::Error::MapFull) => StoreError::MapFull, + ErrorImpl::LmdbError(lmdb::Error::DbsFull) => StoreError::DbsFull, + ErrorImpl::LmdbError(lmdb::Error::ReadersFull) => StoreError::ReadersFull, + ErrorImpl::LmdbError(error) => StoreError::LmdbError(error), + ErrorImpl::UnsuitableEnvironmentPath(path) => StoreError::UnsuitableEnvironmentPath(path), + ErrorImpl::IoError(error) => StoreError::IoError(error), + } + } +} + +impl From<io::Error> for ErrorImpl { + fn from(e: io::Error) -> ErrorImpl { + ErrorImpl::IoError(e) + } +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/flags.rs b/third_party/rust/rkv/src/backend/impl_lmdb/flags.rs new file mode 100644 index 0000000000..d4f19c8c9f --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/flags.rs @@ -0,0 +1,132 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use crate::backend::{ + common::{ + DatabaseFlags, + EnvironmentFlags, + WriteFlags, + }, + traits::{ + BackendDatabaseFlags, + BackendEnvironmentFlags, + BackendFlags, + BackendWriteFlags, + }, +}; + +#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)] +pub struct EnvironmentFlagsImpl(pub(crate) lmdb::EnvironmentFlags); + +impl BackendFlags for EnvironmentFlagsImpl { + fn empty() -> EnvironmentFlagsImpl { + EnvironmentFlagsImpl(lmdb::EnvironmentFlags::empty()) + } +} + +impl BackendEnvironmentFlags for EnvironmentFlagsImpl { + fn set(&mut self, flag: EnvironmentFlags, value: bool) { + self.0.set(flag.into(), value) + } +} + +impl Into<EnvironmentFlagsImpl> for EnvironmentFlags { + fn into(self) -> EnvironmentFlagsImpl { + EnvironmentFlagsImpl(self.into()) + } +} + +impl Into<lmdb::EnvironmentFlags> for EnvironmentFlags { + fn into(self) -> lmdb::EnvironmentFlags { + match self { + EnvironmentFlags::FIXED_MAP => lmdb::EnvironmentFlags::FIXED_MAP, + EnvironmentFlags::NO_SUB_DIR => lmdb::EnvironmentFlags::NO_SUB_DIR, + EnvironmentFlags::WRITE_MAP => lmdb::EnvironmentFlags::WRITE_MAP, + EnvironmentFlags::READ_ONLY => lmdb::EnvironmentFlags::READ_ONLY, + EnvironmentFlags::NO_META_SYNC => lmdb::EnvironmentFlags::NO_META_SYNC, + EnvironmentFlags::NO_SYNC => lmdb::EnvironmentFlags::NO_SYNC, + EnvironmentFlags::MAP_ASYNC => lmdb::EnvironmentFlags::MAP_ASYNC, + EnvironmentFlags::NO_TLS => lmdb::EnvironmentFlags::NO_TLS, + EnvironmentFlags::NO_LOCK => lmdb::EnvironmentFlags::NO_LOCK, + EnvironmentFlags::NO_READAHEAD => lmdb::EnvironmentFlags::NO_READAHEAD, + EnvironmentFlags::NO_MEM_INIT => lmdb::EnvironmentFlags::NO_MEM_INIT, + } + } +} + +#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)] +pub struct DatabaseFlagsImpl(pub(crate) lmdb::DatabaseFlags); + +impl BackendFlags for DatabaseFlagsImpl { + fn empty() -> DatabaseFlagsImpl { + DatabaseFlagsImpl(lmdb::DatabaseFlags::empty()) + } +} + +impl BackendDatabaseFlags for DatabaseFlagsImpl { + fn set(&mut self, flag: DatabaseFlags, value: bool) { + self.0.set(flag.into(), value) + } +} + +impl Into<DatabaseFlagsImpl> for DatabaseFlags { + fn into(self) -> DatabaseFlagsImpl { + DatabaseFlagsImpl(self.into()) + } +} + +impl Into<lmdb::DatabaseFlags> for DatabaseFlags { + fn into(self) -> lmdb::DatabaseFlags { + match self { + DatabaseFlags::REVERSE_KEY => lmdb::DatabaseFlags::REVERSE_KEY, + #[cfg(feature = "db-dup-sort")] + DatabaseFlags::DUP_SORT => lmdb::DatabaseFlags::DUP_SORT, + #[cfg(feature = "db-dup-sort")] + DatabaseFlags::DUP_FIXED => lmdb::DatabaseFlags::DUP_FIXED, + #[cfg(feature = "db-int-key")] + DatabaseFlags::INTEGER_KEY => lmdb::DatabaseFlags::INTEGER_KEY, + DatabaseFlags::INTEGER_DUP => lmdb::DatabaseFlags::INTEGER_DUP, + DatabaseFlags::REVERSE_DUP => lmdb::DatabaseFlags::REVERSE_DUP, + } + } +} + +#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)] +pub struct WriteFlagsImpl(pub(crate) lmdb::WriteFlags); + +impl BackendFlags for WriteFlagsImpl { + fn empty() -> WriteFlagsImpl { + WriteFlagsImpl(lmdb::WriteFlags::empty()) + } +} + +impl BackendWriteFlags for WriteFlagsImpl { + fn set(&mut self, flag: WriteFlags, value: bool) { + self.0.set(flag.into(), value) + } +} + +impl Into<WriteFlagsImpl> for WriteFlags { + fn into(self) -> WriteFlagsImpl { + WriteFlagsImpl(self.into()) + } +} + +impl Into<lmdb::WriteFlags> for WriteFlags { + fn into(self) -> lmdb::WriteFlags { + match self { + WriteFlags::NO_OVERWRITE => lmdb::WriteFlags::NO_OVERWRITE, + WriteFlags::NO_DUP_DATA => lmdb::WriteFlags::NO_DUP_DATA, + WriteFlags::CURRENT => lmdb::WriteFlags::CURRENT, + WriteFlags::APPEND => lmdb::WriteFlags::APPEND, + WriteFlags::APPEND_DUP => lmdb::WriteFlags::APPEND_DUP, + } + } +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/info.rs b/third_party/rust/rkv/src/backend/impl_lmdb/info.rs new file mode 100644 index 0000000000..6188065c07 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/info.rs @@ -0,0 +1,35 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use crate::backend::traits::BackendInfo; + +pub struct InfoImpl(pub(crate) lmdb::Info); + +impl BackendInfo for InfoImpl { + fn map_size(&self) -> usize { + self.0.map_size() + } + + fn last_pgno(&self) -> usize { + self.0.last_pgno() + } + + fn last_txnid(&self) -> usize { + self.0.last_txnid() + } + + fn max_readers(&self) -> usize { + self.0.max_readers() as usize + } + + fn num_readers(&self) -> usize { + self.0.num_readers() as usize + } +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/iter.rs b/third_party/rust/rkv/src/backend/impl_lmdb/iter.rs new file mode 100644 index 0000000000..c7df66b0bb --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/iter.rs @@ -0,0 +1,41 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use super::ErrorImpl; +use crate::backend::traits::BackendIter; + +pub struct IterImpl<'i, C> { + // LMDB semantics dictate that a cursor must be valid for the entire lifetime + // of an iterator. In other words, cursors must not be dropped while an + // iterator built from it is alive. Unfortunately, the LMDB crate API does + // not express this through the type system, so we must enforce it somehow. + #[allow(dead_code)] + cursor: C, + iter: lmdb::Iter<'i>, +} + +impl<'i, C> IterImpl<'i, C> { + pub(crate) fn new(mut cursor: C, to_iter: impl FnOnce(&mut C) -> lmdb::Iter<'i>) -> IterImpl<'i, C> { + let iter = to_iter(&mut cursor); + IterImpl { + cursor, + iter, + } + } +} + +impl<'i, C> BackendIter<'i> for IterImpl<'i, C> { + type Error = ErrorImpl; + + #[allow(clippy::type_complexity)] + fn next(&mut self) -> Option<Result<(&'i [u8], &'i [u8]), Self::Error>> { + self.iter.next().map(|e| e.map_err(ErrorImpl::LmdbError)) + } +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/stat.rs b/third_party/rust/rkv/src/backend/impl_lmdb/stat.rs new file mode 100644 index 0000000000..b0de8c5051 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/stat.rs @@ -0,0 +1,39 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use crate::backend::traits::BackendStat; + +pub struct StatImpl(pub(crate) lmdb::Stat); + +impl BackendStat for StatImpl { + fn page_size(&self) -> usize { + self.0.page_size() as usize + } + + fn depth(&self) -> usize { + self.0.depth() as usize + } + + fn branch_pages(&self) -> usize { + self.0.branch_pages() + } + + fn leaf_pages(&self) -> usize { + self.0.leaf_pages() + } + + fn overflow_pages(&self) -> usize { + self.0.overflow_pages() + } + + fn entries(&self) -> usize { + self.0.entries() + } +} diff --git a/third_party/rust/rkv/src/backend/impl_lmdb/transaction.rs b/third_party/rust/rkv/src/backend/impl_lmdb/transaction.rs new file mode 100644 index 0000000000..d63c5cb4c5 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_lmdb/transaction.rs @@ -0,0 +1,95 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use lmdb::Transaction; + +use super::{ + DatabaseImpl, + ErrorImpl, + RoCursorImpl, + WriteFlagsImpl, +}; +use crate::backend::traits::{ + BackendRoCursorTransaction, + BackendRoTransaction, + BackendRwCursorTransaction, + BackendRwTransaction, +}; + +#[derive(Debug)] +pub struct RoTransactionImpl<'t>(pub(crate) lmdb::RoTransaction<'t>); + +impl<'t> BackendRoTransaction for RoTransactionImpl<'t> { + type Database = DatabaseImpl; + type Error = ErrorImpl; + + fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> { + self.0.get(db.0, &key).map_err(ErrorImpl::LmdbError) + } + + fn abort(self) { + self.0.abort() + } +} + +impl<'t> BackendRoCursorTransaction<'t> for RoTransactionImpl<'t> { + type RoCursor = RoCursorImpl<'t>; + + fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> { + self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl::LmdbError) + } +} + +#[derive(Debug)] +pub struct RwTransactionImpl<'t>(pub(crate) lmdb::RwTransaction<'t>); + +impl<'t> BackendRwTransaction for RwTransactionImpl<'t> { + type Database = DatabaseImpl; + type Error = ErrorImpl; + type Flags = WriteFlagsImpl; + + fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> { + self.0.get(db.0, &key).map_err(ErrorImpl::LmdbError) + } + + fn put(&mut self, db: &Self::Database, key: &[u8], value: &[u8], flags: Self::Flags) -> Result<(), Self::Error> { + self.0.put(db.0, &key, &value, flags.0).map_err(ErrorImpl::LmdbError) + } + + #[cfg(not(feature = "db-dup-sort"))] + fn del(&mut self, db: &Self::Database, key: &[u8]) -> Result<(), Self::Error> { + self.0.del(db.0, &key, None).map_err(ErrorImpl::LmdbError) + } + + #[cfg(feature = "db-dup-sort")] + fn del(&mut self, db: &Self::Database, key: &[u8], value: Option<&[u8]>) -> Result<(), Self::Error> { + self.0.del(db.0, &key, value).map_err(ErrorImpl::LmdbError) + } + + fn clear_db(&mut self, db: &Self::Database) -> Result<(), Self::Error> { + self.0.clear_db(db.0).map_err(ErrorImpl::LmdbError) + } + + fn commit(self) -> Result<(), Self::Error> { + self.0.commit().map_err(ErrorImpl::LmdbError) + } + + fn abort(self) { + self.0.abort() + } +} + +impl<'t> BackendRwCursorTransaction<'t> for RwTransactionImpl<'t> { + type RoCursor = RoCursorImpl<'t>; + + fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> { + self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl::LmdbError) + } +} diff --git a/third_party/rust/rkv/src/backend/impl_safe.rs b/third_party/rust/rkv/src/backend/impl_safe.rs new file mode 100644 index 0000000000..fc75ce3c01 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_safe.rs @@ -0,0 +1,43 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +mod cursor; +mod database; +mod environment; +mod error; +mod flags; +mod info; +mod iter; +mod snapshot; +mod stat; +mod transaction; + +pub use cursor::{ + RoCursorImpl, + RwCursorImpl, +}; +pub use database::DatabaseImpl; +pub use environment::{ + EnvironmentBuilderImpl, + EnvironmentImpl, +}; +pub use error::ErrorImpl; +pub use flags::{ + DatabaseFlagsImpl, + EnvironmentFlagsImpl, + WriteFlagsImpl, +}; +pub use info::InfoImpl; +pub use iter::IterImpl; +pub use stat::StatImpl; +pub use transaction::{ + RoTransactionImpl, + RwTransactionImpl, +}; diff --git a/third_party/rust/rkv/src/backend/impl_safe/cursor.rs b/third_party/rust/rkv/src/backend/impl_safe/cursor.rs new file mode 100644 index 0000000000..0daa84f7fe --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_safe/cursor.rs @@ -0,0 +1,94 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use super::{ + snapshot::Snapshot, + IterImpl, +}; +use crate::backend::traits::BackendRoCursor; + +#[derive(Debug)] +pub struct RoCursorImpl<'c>(pub(crate) &'c Snapshot); + +#[cfg(not(feature = "db-dup-sort"))] +impl<'c> BackendRoCursor<'c> for RoCursorImpl<'c> { + type Iter = IterImpl<'c>; + + fn into_iter(self) -> Self::Iter { + IterImpl(Box::new(self.0.iter())) + } + + fn into_iter_from<K>(self, key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c, + { + IterImpl(Box::new(self.0.iter().skip_while(move |&(k, _)| k < key.as_ref()))) + } + + fn into_iter_dup_of<K>(self, key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c, + { + IterImpl(Box::new(self.0.iter().filter(move |&(k, _)| k == key.as_ref()))) + } +} + +#[cfg(feature = "db-dup-sort")] +impl<'c> BackendRoCursor<'c> for RoCursorImpl<'c> { + type Iter = IterImpl<'c>; + + fn into_iter(self) -> Self::Iter { + let flattened = self.0.iter().flat_map(|(key, values)| values.map(move |value| (key, value))); + IterImpl(Box::new(flattened)) + } + + fn into_iter_from<K>(self, key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c, + { + let skipped = self.0.iter().skip_while(move |&(k, _)| k < key.as_ref()); + let flattened = skipped.flat_map(|(key, values)| values.map(move |value| (key, value))); + IterImpl(Box::new(flattened)) + } + + fn into_iter_dup_of<K>(self, key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c, + { + let filtered = self.0.iter().filter(move |&(k, _)| k == key.as_ref()); + let flattened = filtered.flat_map(|(key, values)| values.map(move |value| (key, value))); + IterImpl(Box::new(flattened)) + } +} + +#[derive(Debug)] +pub struct RwCursorImpl<'c>(&'c mut Snapshot); + +impl<'c> BackendRoCursor<'c> for RwCursorImpl<'c> { + type Iter = IterImpl<'c>; + + fn into_iter(self) -> Self::Iter { + unimplemented!() + } + + fn into_iter_from<K>(self, _key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c, + { + unimplemented!() + } + + fn into_iter_dup_of<K>(self, _key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c, + { + unimplemented!() + } +} diff --git a/third_party/rust/rkv/src/backend/impl_safe/database.rs b/third_party/rust/rkv/src/backend/impl_safe/database.rs new file mode 100644 index 0000000000..9e883d3cfd --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_safe/database.rs @@ -0,0 +1,47 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use id_arena::Id; +use serde_derive::{ + Deserialize, + Serialize, +}; + +use super::{ + snapshot::Snapshot, + DatabaseFlagsImpl, +}; +use crate::backend::traits::BackendDatabase; + +#[derive(Debug, Eq, PartialEq, Copy, Clone, Hash)] +pub struct DatabaseImpl(pub(crate) Id<Database>); + +impl BackendDatabase for DatabaseImpl {} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Database { + snapshot: Snapshot, +} + +impl Database { + pub(crate) fn new(flags: Option<DatabaseFlagsImpl>, snapshot: Option<Snapshot>) -> Database { + Database { + snapshot: snapshot.unwrap_or_else(|| Snapshot::new(flags)), + } + } + + pub(crate) fn snapshot(&self) -> Snapshot { + self.snapshot.clone() + } + + pub(crate) fn replace(&mut self, snapshot: Snapshot) -> Snapshot { + std::mem::replace(&mut self.snapshot, snapshot) + } +} diff --git a/third_party/rust/rkv/src/backend/impl_safe/environment.rs b/third_party/rust/rkv/src/backend/impl_safe/environment.rs new file mode 100644 index 0000000000..3fff98cdc3 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_safe/environment.rs @@ -0,0 +1,325 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + borrow::Cow, + collections::HashMap, + fs, + ops::DerefMut, + path::{ + Path, + PathBuf, + }, + sync::{ + Arc, + RwLock, + RwLockReadGuard, + RwLockWriteGuard, + }, +}; + +use id_arena::Arena; +use log::warn; + +use super::{ + database::Database, + DatabaseFlagsImpl, + DatabaseImpl, + EnvironmentFlagsImpl, + ErrorImpl, + InfoImpl, + RoTransactionImpl, + RwTransactionImpl, + StatImpl, +}; +use crate::backend::traits::{ + BackendEnvironment, + BackendEnvironmentBuilder, +}; + +const DEFAULT_DB_FILENAME: &str = "data.safe.bin"; + +type DatabaseArena = Arena<Database>; +type DatabaseNameMap = HashMap<Option<String>, DatabaseImpl>; + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub struct EnvironmentBuilderImpl { + flags: EnvironmentFlagsImpl, + max_readers: Option<usize>, + max_dbs: Option<usize>, + map_size: Option<usize>, + make_dir_if_needed: bool, + discard_if_corrupted: bool, +} + +impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl { + type Environment = EnvironmentImpl; + type Error = ErrorImpl; + type Flags = EnvironmentFlagsImpl; + + fn new() -> EnvironmentBuilderImpl { + EnvironmentBuilderImpl { + flags: EnvironmentFlagsImpl::empty(), + max_readers: None, + max_dbs: None, + map_size: None, + make_dir_if_needed: false, + discard_if_corrupted: false, + } + } + + fn set_flags<T>(&mut self, flags: T) -> &mut Self + where + T: Into<Self::Flags>, + { + self.flags = flags.into(); + self + } + + fn set_max_readers(&mut self, max_readers: u32) -> &mut Self { + self.max_readers = Some(max_readers as usize); + self + } + + fn set_max_dbs(&mut self, max_dbs: u32) -> &mut Self { + self.max_dbs = Some(max_dbs as usize); + self + } + + fn set_map_size(&mut self, map_size: usize) -> &mut Self { + self.map_size = Some(map_size); + self + } + + fn set_make_dir_if_needed(&mut self, make_dir_if_needed: bool) -> &mut Self { + self.make_dir_if_needed = make_dir_if_needed; + self + } + + fn set_discard_if_corrupted(&mut self, discard_if_corrupted: bool) -> &mut Self { + self.discard_if_corrupted = discard_if_corrupted; + self + } + + fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error> { + // Technically NO_SUB_DIR should change these checks here, but they're both currently + // unimplemented with this storage backend. + if !path.is_dir() { + if !self.make_dir_if_needed { + return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into())); + } + fs::create_dir_all(path)?; + } + let mut env = EnvironmentImpl::new(path, self.flags, self.max_readers, self.max_dbs, self.map_size)?; + env.read_from_disk(self.discard_if_corrupted)?; + Ok(env) + } +} + +#[derive(Debug)] +pub(crate) struct EnvironmentDbs { + pub(crate) arena: DatabaseArena, + pub(crate) name_map: DatabaseNameMap, +} + +#[derive(Debug)] +pub(crate) struct EnvironmentDbsRefMut<'a> { + pub(crate) arena: &'a mut DatabaseArena, + pub(crate) name_map: &'a mut DatabaseNameMap, +} + +impl<'a> From<&'a mut EnvironmentDbs> for EnvironmentDbsRefMut<'a> { + fn from(dbs: &mut EnvironmentDbs) -> EnvironmentDbsRefMut { + EnvironmentDbsRefMut { + arena: &mut dbs.arena, + name_map: &mut dbs.name_map, + } + } +} + +#[derive(Debug)] +pub struct EnvironmentImpl { + path: PathBuf, + max_dbs: usize, + dbs: RwLock<EnvironmentDbs>, + ro_txns: Arc<()>, + rw_txns: Arc<()>, +} + +impl EnvironmentImpl { + fn serialize(&self) -> Result<Vec<u8>, ErrorImpl> { + let dbs = self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)?; + let data: HashMap<_, _> = dbs.name_map.iter().map(|(name, id)| (name, &dbs.arena[id.0])).collect(); + Ok(bincode::serialize(&data)?) + } + + fn deserialize(bytes: &[u8], discard_if_corrupted: bool) -> Result<(DatabaseArena, DatabaseNameMap), ErrorImpl> { + let mut arena = DatabaseArena::new(); + let mut name_map = HashMap::new(); + let data: HashMap<_, _> = match bincode::deserialize(&bytes) { + Err(_) if discard_if_corrupted => Ok(HashMap::new()), + result => result, + }?; + for (name, db) in data { + name_map.insert(name, DatabaseImpl(arena.alloc(db))); + } + Ok((arena, name_map)) + } +} + +impl EnvironmentImpl { + pub(crate) fn new( + path: &Path, + flags: EnvironmentFlagsImpl, + max_readers: Option<usize>, + max_dbs: Option<usize>, + map_size: Option<usize>, + ) -> Result<EnvironmentImpl, ErrorImpl> { + if !flags.is_empty() { + warn!("Ignoring `flags={:?}`", flags); + } + if let Some(max_readers) = max_readers { + warn!("Ignoring `max_readers={}`", max_readers); + } + if let Some(map_size) = map_size { + warn!("Ignoring `map_size={}`", map_size); + } + + Ok(EnvironmentImpl { + path: path.to_path_buf(), + max_dbs: max_dbs.unwrap_or(std::usize::MAX), + dbs: RwLock::new(EnvironmentDbs { + arena: DatabaseArena::new(), + name_map: HashMap::new(), + }), + ro_txns: Arc::new(()), + rw_txns: Arc::new(()), + }) + } + + pub(crate) fn read_from_disk(&mut self, discard_if_corrupted: bool) -> Result<(), ErrorImpl> { + let mut path = Cow::from(&self.path); + if fs::metadata(&path)?.is_dir() { + path.to_mut().push(DEFAULT_DB_FILENAME); + }; + if fs::metadata(&path).is_err() { + return Ok(()); + }; + let (arena, name_map) = Self::deserialize(&fs::read(&path)?, discard_if_corrupted)?; + self.dbs = RwLock::new(EnvironmentDbs { + arena, + name_map, + }); + Ok(()) + } + + pub(crate) fn write_to_disk(&self) -> Result<(), ErrorImpl> { + let mut path = Cow::from(&self.path); + if fs::metadata(&path)?.is_dir() { + path.to_mut().push(DEFAULT_DB_FILENAME); + }; + fs::write(&path, self.serialize()?)?; + Ok(()) + } + + pub(crate) fn dbs(&self) -> Result<RwLockReadGuard<EnvironmentDbs>, ErrorImpl> { + self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError) + } + + pub(crate) fn dbs_mut(&self) -> Result<RwLockWriteGuard<EnvironmentDbs>, ErrorImpl> { + self.dbs.write().map_err(|_| ErrorImpl::EnvPoisonError) + } +} + +impl<'e> BackendEnvironment<'e> for EnvironmentImpl { + type Database = DatabaseImpl; + type Error = ErrorImpl; + type Flags = DatabaseFlagsImpl; + type Info = InfoImpl; + type RoTransaction = RoTransactionImpl<'e>; + type RwTransaction = RwTransactionImpl<'e>; + type Stat = StatImpl; + + fn get_dbs(&self) -> Result<Vec<Option<String>>, Self::Error> { + let dbs = self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)?; + Ok(dbs.name_map.keys().map(|key| key.to_owned()).collect()) + } + + fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error> { + if Arc::strong_count(&self.ro_txns) > 1 { + return Err(ErrorImpl::DbsIllegalOpen); + } + // TOOD: don't reallocate `name`. + let key = name.map(String::from); + let dbs = self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)?; + let db = dbs.name_map.get(&key).ok_or(ErrorImpl::DbNotFoundError)?; + Ok(*db) + } + + fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result<Self::Database, Self::Error> { + if Arc::strong_count(&self.ro_txns) > 1 { + return Err(ErrorImpl::DbsIllegalOpen); + } + // TOOD: don't reallocate `name`. + let key = name.map(String::from); + let mut dbs = self.dbs.write().map_err(|_| ErrorImpl::EnvPoisonError)?; + if dbs.name_map.keys().filter_map(|k| k.as_ref()).count() >= self.max_dbs && name != None { + return Err(ErrorImpl::DbsFull); + } + let parts = EnvironmentDbsRefMut::from(dbs.deref_mut()); + let arena = parts.arena; + let name_map = parts.name_map; + let id = name_map.entry(key).or_insert_with(|| DatabaseImpl(arena.alloc(Database::new(Some(flags), None)))); + Ok(*id) + } + + fn begin_ro_txn(&'e self) -> Result<Self::RoTransaction, Self::Error> { + RoTransactionImpl::new(self, self.ro_txns.clone()) + } + + fn begin_rw_txn(&'e self) -> Result<Self::RwTransaction, Self::Error> { + RwTransactionImpl::new(self, self.rw_txns.clone()) + } + + fn sync(&self, force: bool) -> Result<(), Self::Error> { + warn!("Ignoring `force={}`", force); + self.write_to_disk() + } + + fn stat(&self) -> Result<Self::Stat, Self::Error> { + Ok(StatImpl) + } + + fn info(&self) -> Result<Self::Info, Self::Error> { + Ok(InfoImpl) + } + + fn freelist(&self) -> Result<usize, Self::Error> { + unimplemented!() + } + + fn load_ratio(&self) -> Result<Option<f32>, Self::Error> { + warn!("`load_ratio()` is irrelevant for this storage backend."); + Ok(None) + } + + fn set_map_size(&self, size: usize) -> Result<(), Self::Error> { + warn!("`set_map_size({})` is ignored by this storage backend.", size); + Ok(()) + } + + fn get_files_on_disk(&self) -> Vec<PathBuf> { + // Technically NO_SUB_DIR and NO_LOCK should change this output, but + // they're both currently unimplemented with this storage backend. + let mut db_filename = self.path.clone(); + db_filename.push(DEFAULT_DB_FILENAME); + return vec![db_filename]; + } +} diff --git a/third_party/rust/rkv/src/backend/impl_safe/error.rs b/third_party/rust/rkv/src/backend/impl_safe/error.rs new file mode 100644 index 0000000000..df48d59046 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_safe/error.rs @@ -0,0 +1,82 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + fmt, + io, + path::PathBuf, +}; + +use bincode::Error as BincodeError; + +use crate::{ + backend::traits::BackendError, + error::StoreError, +}; + +#[derive(Debug)] +pub enum ErrorImpl { + KeyValuePairNotFound, + EnvPoisonError, + DbsFull, + DbsIllegalOpen, + DbNotFoundError, + DbIsForeignError, + UnsuitableEnvironmentPath(PathBuf), + IoError(io::Error), + BincodeError(BincodeError), +} + +impl BackendError for ErrorImpl {} + +impl fmt::Display for ErrorImpl { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match self { + ErrorImpl::KeyValuePairNotFound => write!(fmt, "KeyValuePairNotFound (safe mode)"), + ErrorImpl::EnvPoisonError => write!(fmt, "EnvPoisonError (safe mode)"), + ErrorImpl::DbsFull => write!(fmt, "DbsFull (safe mode)"), + ErrorImpl::DbsIllegalOpen => write!(fmt, "DbIllegalOpen (safe mode)"), + ErrorImpl::DbNotFoundError => write!(fmt, "DbNotFoundError (safe mode)"), + ErrorImpl::DbIsForeignError => write!(fmt, "DbIsForeignError (safe mode)"), + ErrorImpl::UnsuitableEnvironmentPath(_) => write!(fmt, "UnsuitableEnvironmentPath (safe mode)"), + ErrorImpl::IoError(e) => e.fmt(fmt), + ErrorImpl::BincodeError(e) => e.fmt(fmt), + } + } +} + +impl Into<StoreError> for ErrorImpl { + fn into(self) -> StoreError { + // The `StoreError::KeyValuePairBadSize` error is unused, because this + // backend supports keys and values of arbitrary sizes. + // The `StoreError::MapFull` and `StoreError::ReadersFull` are + // unimplemented yet, but they should be in the future. + match self { + ErrorImpl::KeyValuePairNotFound => StoreError::KeyValuePairNotFound, + ErrorImpl::BincodeError(_) => StoreError::FileInvalid, + ErrorImpl::DbsFull => StoreError::DbsFull, + ErrorImpl::UnsuitableEnvironmentPath(path) => StoreError::UnsuitableEnvironmentPath(path), + ErrorImpl::IoError(error) => StoreError::IoError(error), + _ => StoreError::SafeModeError(self), + } + } +} + +impl From<io::Error> for ErrorImpl { + fn from(e: io::Error) -> ErrorImpl { + ErrorImpl::IoError(e) + } +} + +impl From<BincodeError> for ErrorImpl { + fn from(e: BincodeError) -> ErrorImpl { + ErrorImpl::BincodeError(e) + } +} diff --git a/third_party/rust/rkv/src/backend/impl_safe/flags.rs b/third_party/rust/rkv/src/backend/impl_safe/flags.rs new file mode 100644 index 0000000000..e3fde1522b --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_safe/flags.rs @@ -0,0 +1,136 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use bitflags::bitflags; +use serde_derive::{ + Deserialize, + Serialize, +}; + +use crate::backend::{ + common::{ + DatabaseFlags, + EnvironmentFlags, + WriteFlags, + }, + traits::{ + BackendDatabaseFlags, + BackendEnvironmentFlags, + BackendFlags, + BackendWriteFlags, + }, +}; + +bitflags! { + #[derive(Default, Serialize, Deserialize)] + pub struct EnvironmentFlagsImpl: u32 { + const NIL = 0b0000_0000; + } +} + +impl BackendFlags for EnvironmentFlagsImpl { + fn empty() -> EnvironmentFlagsImpl { + EnvironmentFlagsImpl::empty() + } +} + +impl BackendEnvironmentFlags for EnvironmentFlagsImpl { + fn set(&mut self, flag: EnvironmentFlags, value: bool) { + self.set(flag.into(), value) + } +} + +impl Into<EnvironmentFlagsImpl> for EnvironmentFlags { + fn into(self) -> EnvironmentFlagsImpl { + match self { + EnvironmentFlags::FIXED_MAP => unimplemented!(), + EnvironmentFlags::NO_SUB_DIR => unimplemented!(), + EnvironmentFlags::WRITE_MAP => unimplemented!(), + EnvironmentFlags::READ_ONLY => unimplemented!(), + EnvironmentFlags::NO_META_SYNC => unimplemented!(), + EnvironmentFlags::NO_SYNC => unimplemented!(), + EnvironmentFlags::MAP_ASYNC => unimplemented!(), + EnvironmentFlags::NO_TLS => unimplemented!(), + EnvironmentFlags::NO_LOCK => unimplemented!(), + EnvironmentFlags::NO_READAHEAD => unimplemented!(), + EnvironmentFlags::NO_MEM_INIT => unimplemented!(), + } + } +} + +bitflags! { + #[derive(Default, Serialize, Deserialize)] + pub struct DatabaseFlagsImpl: u32 { + const NIL = 0b0000_0000; + #[cfg(feature = "db-dup-sort")] + const DUP_SORT = 0b0000_0001; + #[cfg(feature = "db-int-key")] + const INTEGER_KEY = 0b0000_0010; + } +} + +impl BackendFlags for DatabaseFlagsImpl { + fn empty() -> DatabaseFlagsImpl { + DatabaseFlagsImpl::empty() + } +} + +impl BackendDatabaseFlags for DatabaseFlagsImpl { + fn set(&mut self, flag: DatabaseFlags, value: bool) { + self.set(flag.into(), value) + } +} + +impl Into<DatabaseFlagsImpl> for DatabaseFlags { + fn into(self) -> DatabaseFlagsImpl { + match self { + DatabaseFlags::REVERSE_KEY => unimplemented!(), + #[cfg(feature = "db-dup-sort")] + DatabaseFlags::DUP_SORT => DatabaseFlagsImpl::DUP_SORT, + #[cfg(feature = "db-dup-sort")] + DatabaseFlags::DUP_FIXED => unimplemented!(), + #[cfg(feature = "db-int-key")] + DatabaseFlags::INTEGER_KEY => DatabaseFlagsImpl::INTEGER_KEY, + DatabaseFlags::INTEGER_DUP => unimplemented!(), + DatabaseFlags::REVERSE_DUP => unimplemented!(), + } + } +} + +bitflags! { + #[derive(Default, Serialize, Deserialize)] + pub struct WriteFlagsImpl: u32 { + const NIL = 0b0000_0000; + } +} + +impl BackendFlags for WriteFlagsImpl { + fn empty() -> WriteFlagsImpl { + WriteFlagsImpl::empty() + } +} + +impl BackendWriteFlags for WriteFlagsImpl { + fn set(&mut self, flag: WriteFlags, value: bool) { + self.set(flag.into(), value) + } +} + +impl Into<WriteFlagsImpl> for WriteFlags { + fn into(self) -> WriteFlagsImpl { + match self { + WriteFlags::NO_OVERWRITE => unimplemented!(), + WriteFlags::NO_DUP_DATA => unimplemented!(), + WriteFlags::CURRENT => unimplemented!(), + WriteFlags::APPEND => unimplemented!(), + WriteFlags::APPEND_DUP => unimplemented!(), + } + } +} diff --git a/third_party/rust/rkv/src/backend/impl_safe/info.rs b/third_party/rust/rkv/src/backend/impl_safe/info.rs new file mode 100644 index 0000000000..18f0f51da3 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_safe/info.rs @@ -0,0 +1,35 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use crate::backend::traits::BackendInfo; + +pub struct InfoImpl; + +impl BackendInfo for InfoImpl { + fn map_size(&self) -> usize { + unimplemented!() + } + + fn last_pgno(&self) -> usize { + unimplemented!() + } + + fn last_txnid(&self) -> usize { + unimplemented!() + } + + fn max_readers(&self) -> usize { + unimplemented!() + } + + fn num_readers(&self) -> usize { + unimplemented!() + } +} diff --git a/third_party/rust/rkv/src/backend/impl_safe/iter.rs b/third_party/rust/rkv/src/backend/impl_safe/iter.rs new file mode 100644 index 0000000000..a784c00a5b --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_safe/iter.rs @@ -0,0 +1,24 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use super::ErrorImpl; +use crate::backend::traits::BackendIter; + +// FIXME: Use generics instead. +pub struct IterImpl<'i>(pub(crate) Box<dyn Iterator<Item = (&'i [u8], &'i [u8])> + 'i>); + +impl<'i> BackendIter<'i> for IterImpl<'i> { + type Error = ErrorImpl; + + #[allow(clippy::type_complexity)] + fn next(&mut self) -> Option<Result<(&'i [u8], &'i [u8]), Self::Error>> { + self.0.next().map(Ok) + } +} diff --git a/third_party/rust/rkv/src/backend/impl_safe/snapshot.rs b/third_party/rust/rkv/src/backend/impl_safe/snapshot.rs new file mode 100644 index 0000000000..938d5886b5 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_safe/snapshot.rs @@ -0,0 +1,140 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + collections::{ + BTreeMap, + BTreeSet, + }, + sync::Arc, +}; + +use serde_derive::{ + Deserialize, + Serialize, +}; + +use super::DatabaseFlagsImpl; + +type Key = Box<[u8]>; +type Value = Box<[u8]>; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Snapshot { + flags: DatabaseFlagsImpl, + #[cfg(not(feature = "db-dup-sort"))] + map: Arc<BTreeMap<Key, Value>>, + #[cfg(feature = "db-dup-sort")] + map: Arc<BTreeMap<Key, BTreeSet<Value>>>, +} + +impl Snapshot { + pub(crate) fn new(flags: Option<DatabaseFlagsImpl>) -> Snapshot { + Snapshot { + flags: flags.unwrap_or_else(DatabaseFlagsImpl::default), + map: Default::default(), + } + } + + pub(crate) fn flags(&self) -> &DatabaseFlagsImpl { + &self.flags + } + + pub(crate) fn clear(&mut self) { + self.map = Default::default(); + } +} + +#[cfg(not(feature = "db-dup-sort"))] +impl Snapshot { + pub(crate) fn get(&self, key: &[u8]) -> Option<&[u8]> { + self.map.get(key).map(|value| value.as_ref()) + } + + pub(crate) fn put(&mut self, key: &[u8], value: &[u8]) { + let map = Arc::make_mut(&mut self.map); + map.insert(Box::from(key), Box::from(value)); + } + + pub(crate) fn del(&mut self, key: &[u8]) -> Option<()> { + let map = Arc::make_mut(&mut self.map); + map.remove(key).map(|_| ()) + } + + pub(crate) fn iter(&self) -> impl Iterator<Item = (&[u8], &[u8])> { + self.map.iter().map(|(key, value)| (key.as_ref(), value.as_ref())) + } +} + +#[cfg(feature = "db-dup-sort")] +impl Snapshot { + pub(crate) fn get(&self, key: &[u8]) -> Option<&[u8]> { + self.map.get(key).and_then(|v| v.iter().next()).map(|v| v.as_ref()) + } + + pub(crate) fn put(&mut self, key: &[u8], value: &[u8]) { + let map = Arc::make_mut(&mut self.map); + match map.get_mut(key) { + None => { + let mut values = BTreeSet::new(); + values.insert(Box::from(value)); + map.insert(Box::from(key), values); + }, + Some(values) => { + values.clear(); + values.insert(Box::from(value)); + }, + } + } + + pub(crate) fn del(&mut self, key: &[u8]) -> Option<()> { + let map = Arc::make_mut(&mut self.map); + match map.get_mut(key) { + None => None, + Some(values) => { + let was_empty = values.is_empty(); + values.clear(); + Some(()).filter(|_| !was_empty) + }, + } + } + + pub(crate) fn iter(&self) -> impl Iterator<Item = (&[u8], impl Iterator<Item = &[u8]>)> { + self.map.iter().map(|(key, values)| (key.as_ref(), values.iter().map(|value| value.as_ref()))) + } +} + +#[cfg(feature = "db-dup-sort")] +impl Snapshot { + pub(crate) fn put_dup(&mut self, key: &[u8], value: &[u8]) { + let map = Arc::make_mut(&mut self.map); + match map.get_mut(key) { + None => { + let mut values = BTreeSet::new(); + values.insert(Box::from(value)); + map.insert(Box::from(key), values); + }, + Some(values) => { + values.insert(Box::from(value)); + }, + } + } + + pub(crate) fn del_exact(&mut self, key: &[u8], value: &[u8]) -> Option<()> { + let map = Arc::make_mut(&mut self.map); + match map.get_mut(key) { + None => None, + Some(values) => { + let was_removed = values.remove(value); + Some(()).filter(|_| was_removed) + }, + } + } +} diff --git a/third_party/rust/rkv/src/backend/impl_safe/stat.rs b/third_party/rust/rkv/src/backend/impl_safe/stat.rs new file mode 100644 index 0000000000..c117b56833 --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_safe/stat.rs @@ -0,0 +1,39 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use crate::backend::traits::BackendStat; + +pub struct StatImpl; + +impl BackendStat for StatImpl { + fn page_size(&self) -> usize { + unimplemented!() + } + + fn depth(&self) -> usize { + unimplemented!() + } + + fn branch_pages(&self) -> usize { + unimplemented!() + } + + fn leaf_pages(&self) -> usize { + unimplemented!() + } + + fn overflow_pages(&self) -> usize { + unimplemented!() + } + + fn entries(&self) -> usize { + unimplemented!() + } +} diff --git a/third_party/rust/rkv/src/backend/impl_safe/transaction.rs b/third_party/rust/rkv/src/backend/impl_safe/transaction.rs new file mode 100644 index 0000000000..cd3d55db7d --- /dev/null +++ b/third_party/rust/rkv/src/backend/impl_safe/transaction.rs @@ -0,0 +1,167 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + collections::HashMap, + sync::Arc, +}; + +use super::{ + snapshot::Snapshot, + DatabaseImpl, + EnvironmentImpl, + ErrorImpl, + RoCursorImpl, + WriteFlagsImpl, +}; +use crate::backend::traits::{ + BackendRoCursorTransaction, + BackendRoTransaction, + BackendRwCursorTransaction, + BackendRwTransaction, +}; + +#[derive(Debug)] +pub struct RoTransactionImpl<'t> { + env: &'t EnvironmentImpl, + snapshots: HashMap<DatabaseImpl, Snapshot>, + idx: Arc<()>, +} + +impl<'t> RoTransactionImpl<'t> { + pub(crate) fn new(env: &'t EnvironmentImpl, idx: Arc<()>) -> Result<RoTransactionImpl<'t>, ErrorImpl> { + let snapshots = env.dbs()?.arena.iter().map(|(id, db)| (DatabaseImpl(id), db.snapshot())).collect(); + Ok(RoTransactionImpl { + env, + snapshots, + idx, + }) + } +} + +impl<'t> BackendRoTransaction for RoTransactionImpl<'t> { + type Database = DatabaseImpl; + type Error = ErrorImpl; + + fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> { + let snapshot = self.snapshots.get(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?; + snapshot.get(key).ok_or_else(|| ErrorImpl::KeyValuePairNotFound) + } + + fn abort(self) { + // noop + } +} + +impl<'t> BackendRoCursorTransaction<'t> for RoTransactionImpl<'t> { + type RoCursor = RoCursorImpl<'t>; + + fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> { + let snapshot = self.snapshots.get(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?; + Ok(RoCursorImpl(snapshot)) + } +} + +#[derive(Debug)] +pub struct RwTransactionImpl<'t> { + env: &'t EnvironmentImpl, + snapshots: HashMap<DatabaseImpl, Snapshot>, + idx: Arc<()>, +} + +impl<'t> RwTransactionImpl<'t> { + pub(crate) fn new(env: &'t EnvironmentImpl, idx: Arc<()>) -> Result<RwTransactionImpl<'t>, ErrorImpl> { + let snapshots = env.dbs()?.arena.iter().map(|(id, db)| (DatabaseImpl(id), db.snapshot())).collect(); + Ok(RwTransactionImpl { + env, + snapshots, + idx, + }) + } +} + +impl<'t> BackendRwTransaction for RwTransactionImpl<'t> { + type Database = DatabaseImpl; + type Error = ErrorImpl; + type Flags = WriteFlagsImpl; + + fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> { + let snapshot = self.snapshots.get(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?; + snapshot.get(key).ok_or_else(|| ErrorImpl::KeyValuePairNotFound) + } + + #[cfg(not(feature = "db-dup-sort"))] + fn put(&mut self, db: &Self::Database, key: &[u8], value: &[u8], _flags: Self::Flags) -> Result<(), Self::Error> { + let snapshot = self.snapshots.get_mut(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?; + snapshot.put(key, value); + Ok(()) + } + + #[cfg(feature = "db-dup-sort")] + fn put(&mut self, db: &Self::Database, key: &[u8], value: &[u8], _flags: Self::Flags) -> Result<(), Self::Error> { + use super::DatabaseFlagsImpl; + let snapshot = self.snapshots.get_mut(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?; + if snapshot.flags().contains(DatabaseFlagsImpl::DUP_SORT) { + snapshot.put_dup(key, value); + } else { + snapshot.put(key, value); + } + Ok(()) + } + + #[cfg(not(feature = "db-dup-sort"))] + fn del(&mut self, db: &Self::Database, key: &[u8]) -> Result<(), Self::Error> { + let snapshot = self.snapshots.get_mut(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?; + let deleted = snapshot.del(key); + Ok(deleted.ok_or_else(|| ErrorImpl::KeyValuePairNotFound)?) + } + + #[cfg(feature = "db-dup-sort")] + fn del(&mut self, db: &Self::Database, key: &[u8], value: Option<&[u8]>) -> Result<(), Self::Error> { + use super::DatabaseFlagsImpl; + let snapshot = self.snapshots.get_mut(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?; + let deleted = match (value, snapshot.flags()) { + (Some(value), flags) if flags.contains(DatabaseFlagsImpl::DUP_SORT) => snapshot.del_exact(key, value), + _ => snapshot.del(key), + }; + Ok(deleted.ok_or_else(|| ErrorImpl::KeyValuePairNotFound)?) + } + + fn clear_db(&mut self, db: &Self::Database) -> Result<(), Self::Error> { + let snapshot = self.snapshots.get_mut(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?; + snapshot.clear(); + Ok(()) + } + + fn commit(self) -> Result<(), Self::Error> { + let mut dbs = self.env.dbs_mut()?; + + for (id, snapshot) in self.snapshots { + let db = dbs.arena.get_mut(id.0).ok_or_else(|| ErrorImpl::DbIsForeignError)?; + db.replace(snapshot); + } + + drop(dbs); + self.env.write_to_disk() + } + + fn abort(self) { + // noop + } +} + +impl<'t> BackendRwCursorTransaction<'t> for RwTransactionImpl<'t> { + type RoCursor = RoCursorImpl<'t>; + + fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> { + let snapshot = self.snapshots.get(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?; + Ok(RoCursorImpl(snapshot)) + } +} diff --git a/third_party/rust/rkv/src/backend/traits.rs b/third_party/rust/rkv/src/backend/traits.rs new file mode 100644 index 0000000000..d589a3c5fa --- /dev/null +++ b/third_party/rust/rkv/src/backend/traits.rs @@ -0,0 +1,197 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + fmt::{ + Debug, + Display, + }, + path::{ + Path, + PathBuf, + }, +}; + +use crate::{ + backend::common::{ + DatabaseFlags, + EnvironmentFlags, + WriteFlags, + }, + error::StoreError, +}; + +pub trait BackendError: Debug + Display + Into<StoreError> {} + +pub trait BackendDatabase: Debug + Eq + PartialEq + Copy + Clone {} + +pub trait BackendFlags: Debug + Eq + PartialEq + Copy + Clone + Default { + fn empty() -> Self; +} + +pub trait BackendEnvironmentFlags: BackendFlags { + fn set(&mut self, flag: EnvironmentFlags, value: bool); +} + +pub trait BackendDatabaseFlags: BackendFlags { + fn set(&mut self, flag: DatabaseFlags, value: bool); +} + +pub trait BackendWriteFlags: BackendFlags { + fn set(&mut self, flag: WriteFlags, value: bool); +} + +pub trait BackendStat { + fn page_size(&self) -> usize; + + fn depth(&self) -> usize; + + fn branch_pages(&self) -> usize; + + fn leaf_pages(&self) -> usize; + + fn overflow_pages(&self) -> usize; + + fn entries(&self) -> usize; +} + +pub trait BackendInfo { + fn map_size(&self) -> usize; + + fn last_pgno(&self) -> usize; + + fn last_txnid(&self) -> usize; + + fn max_readers(&self) -> usize; + + fn num_readers(&self) -> usize; +} + +pub trait BackendEnvironmentBuilder<'b>: Debug + Eq + PartialEq + Copy + Clone { + type Error: BackendError; + type Environment: BackendEnvironment<'b>; + type Flags: BackendEnvironmentFlags; + + fn new() -> Self; + + fn set_flags<T>(&mut self, flags: T) -> &mut Self + where + T: Into<Self::Flags>; + + fn set_max_dbs(&mut self, max_dbs: u32) -> &mut Self; + + fn set_max_readers(&mut self, max_readers: u32) -> &mut Self; + + fn set_map_size(&mut self, size: usize) -> &mut Self; + + fn set_make_dir_if_needed(&mut self, make_dir_if_needed: bool) -> &mut Self; + + fn set_discard_if_corrupted(&mut self, discard_if_corrupted: bool) -> &mut Self; + + fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error>; +} + +pub trait BackendEnvironment<'e>: Debug { + type Error: BackendError; + type Database: BackendDatabase; + type Flags: BackendDatabaseFlags; + type Stat: BackendStat; + type Info: BackendInfo; + type RoTransaction: BackendRoCursorTransaction<'e, Database = Self::Database>; + type RwTransaction: BackendRwCursorTransaction<'e, Database = Self::Database>; + + fn get_dbs(&self) -> Result<Vec<Option<String>>, Self::Error>; + + fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error>; + + fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result<Self::Database, Self::Error>; + + fn begin_ro_txn(&'e self) -> Result<Self::RoTransaction, Self::Error>; + + fn begin_rw_txn(&'e self) -> Result<Self::RwTransaction, Self::Error>; + + fn sync(&self, force: bool) -> Result<(), Self::Error>; + + fn stat(&self) -> Result<Self::Stat, Self::Error>; + + fn info(&self) -> Result<Self::Info, Self::Error>; + + fn freelist(&self) -> Result<usize, Self::Error>; + + fn load_ratio(&self) -> Result<Option<f32>, Self::Error>; + + fn set_map_size(&self, size: usize) -> Result<(), Self::Error>; + + fn get_files_on_disk(&self) -> Vec<PathBuf>; +} + +pub trait BackendRoTransaction: Debug { + type Error: BackendError; + type Database: BackendDatabase; + + fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error>; + + fn abort(self); +} + +pub trait BackendRwTransaction: Debug { + type Error: BackendError; + type Database: BackendDatabase; + type Flags: BackendWriteFlags; + + fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error>; + + fn put(&mut self, db: &Self::Database, key: &[u8], value: &[u8], flags: Self::Flags) -> Result<(), Self::Error>; + + #[cfg(not(feature = "db-dup-sort"))] + fn del(&mut self, db: &Self::Database, key: &[u8]) -> Result<(), Self::Error>; + + #[cfg(feature = "db-dup-sort")] + fn del(&mut self, db: &Self::Database, key: &[u8], value: Option<&[u8]>) -> Result<(), Self::Error>; + + fn clear_db(&mut self, db: &Self::Database) -> Result<(), Self::Error>; + + fn commit(self) -> Result<(), Self::Error>; + + fn abort(self); +} + +pub trait BackendRoCursorTransaction<'t>: BackendRoTransaction { + type RoCursor: BackendRoCursor<'t>; + + fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error>; +} + +pub trait BackendRwCursorTransaction<'t>: BackendRwTransaction { + type RoCursor: BackendRoCursor<'t>; + + fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error>; +} + +pub trait BackendRoCursor<'c>: Debug { + type Iter: BackendIter<'c>; + + fn into_iter(self) -> Self::Iter; + + fn into_iter_from<K>(self, key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c; + + fn into_iter_dup_of<K>(self, key: K) -> Self::Iter + where + K: AsRef<[u8]> + 'c; +} + +pub trait BackendIter<'i> { + type Error: BackendError; + + #[allow(clippy::type_complexity)] + fn next(&mut self) -> Option<Result<(&'i [u8], &'i [u8]), Self::Error>>; +} diff --git a/third_party/rust/rkv/src/bin/dump.rs b/third_party/rust/rkv/src/bin/dump.rs new file mode 100644 index 0000000000..04ae824c5d --- /dev/null +++ b/third_party/rust/rkv/src/bin/dump.rs @@ -0,0 +1,54 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + env::args, + io, + path::Path, +}; + +use rkv::migrator::{ + LmdbArchMigrateError, + LmdbArchMigrator, +}; + +fn main() -> Result<(), LmdbArchMigrateError> { + let mut cli_args = args(); + let mut db_name = None; + let mut env_path = None; + + // The first arg is the name of the program, which we can ignore. + cli_args.next(); + + while let Some(arg) = cli_args.next() { + if &arg[0..1] == "-" { + match &arg[1..] { + "s" => { + db_name = match cli_args.next() { + None => return Err("-s must be followed by database name".into()), + Some(str) => Some(str), + }; + }, + str => return Err(format!("arg -{} not recognized", str).into()), + } + } else { + if env_path.is_some() { + return Err("must provide only one path to the LMDB environment".into()); + } + env_path = Some(arg); + } + } + + let env_path = env_path.ok_or("must provide a path to the LMDB environment")?; + let mut migrator = LmdbArchMigrator::new(Path::new(&env_path))?; + migrator.dump(db_name.as_deref(), io::stdout()).unwrap(); + + Ok(()) +} diff --git a/third_party/rust/rkv/src/bin/rand.rs b/third_party/rust/rkv/src/bin/rand.rs new file mode 100644 index 0000000000..54492d8b92 --- /dev/null +++ b/third_party/rust/rkv/src/bin/rand.rs @@ -0,0 +1,113 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//! A command-line utility to create an LMDB environment containing random data. +//! It requires one flag, `-s path/to/environment`, which specifies the location +//! where the tool should create the environment. Optionally, you may specify +//! the number of key/value pairs to create via the `-n <number>` flag +//! (for which the default value is 50). + +use std::{ + env::args, + fs, + fs::File, + io::Read, + path::Path, +}; + +use rkv::{ + backend::{ + BackendEnvironmentBuilder, + Lmdb, + }, + Rkv, + StoreOptions, + Value, +}; + +fn main() { + let mut args = args(); + let mut database = None; + let mut path = None; + let mut num_pairs = 50; + + // The first arg is the name of the program, which we can ignore. + args.next(); + + while let Some(arg) = args.next() { + if &arg[0..1] == "-" { + match &arg[1..] { + "s" => { + database = match args.next() { + None => panic!("-s must be followed by database arg"), + Some(str) => Some(str), + }; + }, + "n" => { + num_pairs = match args.next() { + None => panic!("-s must be followed by number of pairs"), + Some(str) => str.parse().expect("number"), + }; + }, + str => panic!("arg -{} not recognized", str), + } + } else { + if path.is_some() { + panic!("must provide only one path to the LMDB environment"); + } + path = Some(arg); + } + } + + if path.is_none() { + panic!("must provide a path to the LMDB environment"); + } + + let path = path.unwrap(); + fs::create_dir_all(&path).expect("dir created"); + + let mut builder = Rkv::environment_builder::<Lmdb>(); + builder.set_max_dbs(2); + // Allocate enough map to accommodate the largest random collection. + // We currently do this by allocating twice the maximum possible size + // of the pairs (assuming maximum key and value sizes). + builder.set_map_size((511 + 65535) * num_pairs * 2); + let rkv = Rkv::from_builder(Path::new(&path), builder).expect("Rkv"); + let store = rkv.open_single(database.as_deref(), StoreOptions::create()).expect("opened"); + let mut writer = rkv.write().expect("writer"); + + // Generate random values for the number of keys and key/value lengths. + // On Linux, "Just use /dev/urandom!" <https://www.2uo.de/myths-about-urandom/>. + // On macOS it doesn't matter (/dev/random and /dev/urandom are identical). + let mut random = File::open("/dev/urandom").unwrap(); + let mut nums = [0u8; 4]; + random.read_exact(&mut nums).unwrap(); + + // Generate 0–255 pairs. + for _ in 0..num_pairs { + // Generate key and value lengths. The key must be 1–511 bytes long. + // The value length can be 0 and is essentially unbounded; we generate + // value lengths of 0–0xffff (65535). + // NB: the modulus method for generating a random number within a range + // introduces distribution skew, but we don't need it to be perfect. + let key_len = ((u16::from(nums[0]) + (u16::from(nums[1]) << 8)) % 511 + 1) as usize; + let value_len = (u16::from(nums[2]) + (u16::from(nums[3]) << 8)) as usize; + + let mut key: Vec<u8> = vec![0; key_len]; + random.read_exact(&mut key[0..key_len]).unwrap(); + + let mut value: Vec<u8> = vec![0; value_len]; + random.read_exact(&mut value[0..value_len]).unwrap(); + + store.put(&mut writer, key, &Value::Blob(&value)).expect("wrote"); + } + + writer.commit().expect("committed"); +} diff --git a/third_party/rust/rkv/src/env.rs b/third_party/rust/rkv/src/env.rs new file mode 100644 index 0000000000..c8e669e493 --- /dev/null +++ b/third_party/rust/rkv/src/env.rs @@ -0,0 +1,331 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + fs, + os::raw::c_uint, + path::{ + Path, + PathBuf, + }, +}; + +#[cfg(any(feature = "db-dup-sort", feature = "db-int-key"))] +use crate::backend::{ + BackendDatabaseFlags, + DatabaseFlags, +}; +use crate::{ + backend::{ + BackendEnvironment, + BackendEnvironmentBuilder, + BackendRoCursorTransaction, + BackendRwCursorTransaction, + SafeModeError, + }, + error::{ + CloseError, + StoreError, + }, + readwrite::{ + Reader, + Writer, + }, + store::{ + single::SingleStore, + CloseOptions, + Options as StoreOptions, + }, +}; + +#[cfg(feature = "db-dup-sort")] +use crate::store::multi::MultiStore; + +#[cfg(feature = "db-int-key")] +use crate::store::integer::IntegerStore; +#[cfg(feature = "db-int-key")] +use crate::store::keys::PrimitiveInt; + +#[cfg(all(feature = "db-dup-sort", feature = "db-int-key"))] +use crate::store::integermulti::MultiIntegerStore; + +pub static DEFAULT_MAX_DBS: c_uint = 5; + +/// Wrapper around an `Environment` (e.g. such as an `LMDB` or `SafeMode` environment). +#[derive(Debug)] +pub struct Rkv<E> { + path: PathBuf, + env: E, +} + +/// Static methods. +impl<'e, E> Rkv<E> +where + E: BackendEnvironment<'e>, +{ + pub fn environment_builder<B>() -> B + where + B: BackendEnvironmentBuilder<'e, Environment = E>, + { + B::new() + } + + /// Return a new Rkv environment that supports up to `DEFAULT_MAX_DBS` open databases. + #[allow(clippy::new_ret_no_self)] + pub fn new<B>(path: &Path) -> Result<Rkv<E>, StoreError> + where + B: BackendEnvironmentBuilder<'e, Environment = E>, + { + Rkv::with_capacity::<B>(path, DEFAULT_MAX_DBS) + } + + /// Return a new Rkv environment that supports the specified number of open databases. + pub fn with_capacity<B>(path: &Path, max_dbs: c_uint) -> Result<Rkv<E>, StoreError> + where + B: BackendEnvironmentBuilder<'e, Environment = E>, + { + let mut builder = B::new(); + builder.set_max_dbs(max_dbs); + + // Future: set flags, maximum size, etc. here if necessary. + Rkv::from_builder(path, builder) + } + + /// Return a new Rkv environment from the provided builder. + pub fn from_builder<B>(path: &Path, builder: B) -> Result<Rkv<E>, StoreError> + where + B: BackendEnvironmentBuilder<'e, Environment = E>, + { + Ok(Rkv { + path: path.into(), + env: builder.open(path).map_err(|e| e.into())?, + }) + } +} + +/// Store creation methods. +impl<'e, E> Rkv<E> +where + E: BackendEnvironment<'e>, +{ + /// Return all created databases. + pub fn get_dbs(&self) -> Result<Vec<Option<String>>, StoreError> { + self.env.get_dbs().map_err(|e| e.into()) + } + + /// Create or Open an existing database in (&[u8] -> Single Value) mode. + /// Note: that create=true cannot be called concurrently with other operations so if + /// you are sure that the database exists, call this with create=false. + pub fn open_single<'s, T>( + &self, + name: T, + opts: StoreOptions<E::Flags>, + ) -> Result<SingleStore<E::Database>, StoreError> + where + T: Into<Option<&'s str>>, + { + self.open(name, opts).map(SingleStore::new) + } + + /// Create or Open an existing database in (Integer -> Single Value) mode. + /// Note: that create=true cannot be called concurrently with other operations so if + /// you are sure that the database exists, call this with create=false. + #[cfg(feature = "db-int-key")] + pub fn open_integer<'s, T, K>( + &self, + name: T, + mut opts: StoreOptions<E::Flags>, + ) -> Result<IntegerStore<E::Database, K>, StoreError> + where + K: PrimitiveInt, + T: Into<Option<&'s str>>, + { + opts.flags.set(DatabaseFlags::INTEGER_KEY, true); + self.open(name, opts).map(IntegerStore::new) + } + + /// Create or Open an existing database in (&[u8] -> Multiple Values) mode. + /// Note: that create=true cannot be called concurrently with other operations so if + /// you are sure that the database exists, call this with create=false. + #[cfg(feature = "db-dup-sort")] + pub fn open_multi<'s, T>( + &self, + name: T, + mut opts: StoreOptions<E::Flags>, + ) -> Result<MultiStore<E::Database>, StoreError> + where + T: Into<Option<&'s str>>, + { + opts.flags.set(DatabaseFlags::DUP_SORT, true); + self.open(name, opts).map(MultiStore::new) + } + + /// Create or Open an existing database in (Integer -> Multiple Values) mode. + /// Note: that create=true cannot be called concurrently with other operations so if + /// you are sure that the database exists, call this with create=false. + #[cfg(all(feature = "db-dup-sort", feature = "db-int-key"))] + pub fn open_multi_integer<'s, T, K>( + &self, + name: T, + mut opts: StoreOptions<E::Flags>, + ) -> Result<MultiIntegerStore<E::Database, K>, StoreError> + where + K: PrimitiveInt, + T: Into<Option<&'s str>>, + { + opts.flags.set(DatabaseFlags::INTEGER_KEY, true); + opts.flags.set(DatabaseFlags::DUP_SORT, true); + self.open(name, opts).map(MultiIntegerStore::new) + } + + fn open<'s, T>(&self, name: T, opts: StoreOptions<E::Flags>) -> Result<E::Database, StoreError> + where + T: Into<Option<&'s str>>, + { + if opts.create { + self.env.create_db(name.into(), opts.flags).map_err(|e| { + match e.into() { + StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(), + StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(), + e => e, + } + }) + } else { + self.env.open_db(name.into()).map_err(|e| { + match e.into() { + StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(), + StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(), + e => e, + } + }) + } + } +} + +/// Read and write accessors. +impl<'e, E> Rkv<E> +where + E: BackendEnvironment<'e>, +{ + /// Create a read transaction. There can be multiple concurrent readers for an + /// environment, up to the maximum specified by LMDB (default 126), and you can open + /// readers while a write transaction is active. + pub fn read<T>(&'e self) -> Result<Reader<T>, StoreError> + where + E: BackendEnvironment<'e, RoTransaction = T>, + T: BackendRoCursorTransaction<'e, Database = E::Database>, + { + Ok(Reader::new(self.env.begin_ro_txn().map_err(|e| e.into())?)) + } + + /// Create a write transaction. There can be only one write transaction active at any + /// given time, so trying to create a second one will block until the first is + /// committed or aborted. + pub fn write<T>(&'e self) -> Result<Writer<T>, StoreError> + where + E: BackendEnvironment<'e, RwTransaction = T>, + T: BackendRwCursorTransaction<'e, Database = E::Database>, + { + Ok(Writer::new(self.env.begin_rw_txn().map_err(|e| e.into())?)) + } +} + +/// Other environment methods. +impl<'e, E> Rkv<E> +where + E: BackendEnvironment<'e>, +{ + /// Flush the data buffers to disk. This call is only useful, when the environment was + /// open with either `NO_SYNC`, `NO_META_SYNC` or `MAP_ASYNC` (see below). The call is + /// not valid if the environment was opened with `READ_ONLY`. + /// + /// Data is always written to disk when `transaction.commit()` is called, but the + /// operating system may keep it buffered. LMDB always flushes the OS buffers upon + /// commit as well, unless the environment was opened with `NO_SYNC` or in part + /// `NO_META_SYNC`. + /// + /// `force`: if true, force a synchronous flush. Otherwise if the environment has the + /// `NO_SYNC` flag set the flushes will be omitted, and with `MAP_ASYNC` they will + /// be asynchronous. + pub fn sync(&self, force: bool) -> Result<(), StoreError> { + self.env.sync(force).map_err(|e| e.into()) + } + + /// Retrieve statistics about this environment. + /// + /// It includes: + /// * Page size in bytes + /// * B-tree depth + /// * Number of internal (non-leaf) pages + /// * Number of leaf pages + /// * Number of overflow pages + /// * Number of data entries + pub fn stat(&self) -> Result<E::Stat, StoreError> { + self.env.stat().map_err(|e| e.into()) + } + + /// Retrieve information about this environment. + /// + /// It includes: + /// * Map size in bytes + /// * The last used page number + /// * The last transaction ID + /// * Max number of readers allowed + /// * Number of readers in use + pub fn info(&self) -> Result<E::Info, StoreError> { + self.env.info().map_err(|e| e.into()) + } + + /// Retrieve the load ratio (# of used pages / total pages) about this environment. + /// + /// With the formular: (last_page_no - freelist_pages) / total_pages. + /// A value of `None` means that the backend doesn't ever need to be resized. + pub fn load_ratio(&self) -> Result<Option<f32>, StoreError> { + self.env.load_ratio().map_err(|e| e.into()) + } + + /// Sets the size of the memory map to use for the environment. + /// + /// This can be used to resize the map when the environment is already open. You can + /// also use `Rkv::environment_builder()` to set the map size during the `Rkv` + /// initialization. + /// + /// Note: + /// + /// * No active transactions allowed when performing resizing in this process. It's up + /// to the consumer to enforce that. + /// + /// * The size should be a multiple of the OS page size. Any attempt to set a size + /// smaller than the space already consumed by the environment will be silently + /// changed to the current size of the used space. + /// + /// * In the multi-process case, once a process resizes the map, other processes need + /// to either re-open the environment, or call set_map_size with size 0 to update + /// the environment. Otherwise, new transaction creation will fail with + /// `LmdbError::MapResized`. + pub fn set_map_size(&self, size: usize) -> Result<(), StoreError> { + self.env.set_map_size(size).map_err(Into::into) + } + + /// Closes this environment and optionally deletes all its files from disk. Doesn't + /// delete the folder used when opening the environment. + pub fn close(self, options: CloseOptions) -> Result<(), CloseError> { + let files = self.env.get_files_on_disk(); + drop(self); + + if options.delete { + for file in files { + fs::remove_file(file)?; + } + } + + Ok(()) + } +} diff --git a/third_party/rust/rkv/src/error.rs b/third_party/rust/rkv/src/error.rs new file mode 100644 index 0000000000..9b8a4c02c4 --- /dev/null +++ b/third_party/rust/rkv/src/error.rs @@ -0,0 +1,195 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + io, + path::PathBuf, + str, + sync, + thread, + thread::ThreadId, +}; + +use failure::Fail; + +pub use crate::backend::SafeModeError; +use crate::value::Type; + +#[derive(Debug, Fail)] +pub enum DataError { + #[fail(display = "unknown type tag: {}", _0)] + UnknownType(u8), + + #[fail(display = "unexpected type tag: expected {}, got {}", expected, actual)] + UnexpectedType { + expected: Type, + actual: Type, + }, + + #[fail(display = "empty data; expected tag")] + Empty, + + #[fail(display = "invalid value for type {}: {}", value_type, err)] + DecodingError { + value_type: Type, + err: Box<bincode::ErrorKind>, + }, + + #[fail(display = "couldn't encode value: {}", _0)] + EncodingError(Box<bincode::ErrorKind>), + + #[fail(display = "invalid uuid bytes")] + InvalidUuid, +} + +impl From<Box<bincode::ErrorKind>> for DataError { + fn from(e: Box<bincode::ErrorKind>) -> DataError { + DataError::EncodingError(e) + } +} + +#[derive(Debug, Fail)] +pub enum StoreError { + #[fail(display = "manager poisoned")] + ManagerPoisonError, + + #[fail(display = "database corrupted")] + DatabaseCorrupted, + + #[fail(display = "key/value pair not found")] + KeyValuePairNotFound, + + #[fail(display = "unsupported size of key/DB name/data")] + KeyValuePairBadSize, + + #[fail(display = "file is not a valid database")] + FileInvalid, + + #[fail(display = "environment mapsize reached")] + MapFull, + + #[fail(display = "environment maxdbs reached")] + DbsFull, + + #[fail(display = "environment maxreaders reached")] + ReadersFull, + + #[fail(display = "I/O error: {:?}", _0)] + IoError(io::Error), + + #[fail(display = "environment path does not exist or not the right type: {:?}", _0)] + UnsuitableEnvironmentPath(PathBuf), + + #[fail(display = "data error: {:?}", _0)] + DataError(DataError), + + #[fail(display = "lmdb backend error: {}", _0)] + LmdbError(lmdb::Error), + + #[fail(display = "safe mode backend error: {}", _0)] + SafeModeError(SafeModeError), + + #[fail(display = "read transaction already exists in thread {:?}", _0)] + ReadTransactionAlreadyExists(ThreadId), + + #[fail(display = "attempted to open DB during transaction in thread {:?}", _0)] + OpenAttemptedDuringTransaction(ThreadId), +} + +impl StoreError { + pub fn open_during_transaction() -> StoreError { + StoreError::OpenAttemptedDuringTransaction(thread::current().id()) + } + + pub fn read_transaction_already_exists() -> StoreError { + StoreError::ReadTransactionAlreadyExists(thread::current().id()) + } +} + +impl From<DataError> for StoreError { + fn from(e: DataError) -> StoreError { + StoreError::DataError(e) + } +} + +impl From<io::Error> for StoreError { + fn from(e: io::Error) -> StoreError { + StoreError::IoError(e) + } +} + +impl<T> From<sync::PoisonError<T>> for StoreError { + fn from(_: sync::PoisonError<T>) -> StoreError { + StoreError::ManagerPoisonError + } +} + +#[derive(Debug, Fail)] +pub enum CloseError { + #[fail(display = "manager poisoned")] + ManagerPoisonError, + + #[fail(display = "close attempted while manager has an environment still open")] + EnvironmentStillOpen, + + #[fail(display = "close attempted while an environment not known to the manager is still open")] + UnknownEnvironmentStillOpen, + + #[fail(display = "I/O error: {:?}", _0)] + IoError(io::Error), +} + +impl<T> From<sync::PoisonError<T>> for CloseError { + fn from(_: sync::PoisonError<T>) -> CloseError { + CloseError::ManagerPoisonError + } +} + +impl From<io::Error> for CloseError { + fn from(e: io::Error) -> CloseError { + CloseError::IoError(e) + } +} + +#[derive(Debug, Fail)] +pub enum MigrateError { + #[fail(display = "store error: {}", _0)] + StoreError(StoreError), + + #[fail(display = "close error: {}", _0)] + CloseError(CloseError), + + #[fail(display = "manager poisoned")] + ManagerPoisonError, + + #[fail(display = "source is empty")] + SourceEmpty, + + #[fail(display = "destination is not empty")] + DestinationNotEmpty, +} + +impl From<StoreError> for MigrateError { + fn from(e: StoreError) -> MigrateError { + MigrateError::StoreError(e) + } +} + +impl From<CloseError> for MigrateError { + fn from(e: CloseError) -> MigrateError { + MigrateError::CloseError(e) + } +} + +impl<T> From<sync::PoisonError<T>> for MigrateError { + fn from(_: sync::PoisonError<T>) -> MigrateError { + MigrateError::ManagerPoisonError + } +} diff --git a/third_party/rust/rkv/src/helpers.rs b/third_party/rust/rkv/src/helpers.rs new file mode 100644 index 0000000000..6f6cd9c774 --- /dev/null +++ b/third_party/rust/rkv/src/helpers.rs @@ -0,0 +1,47 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + io, + path::{ + Path, + PathBuf, + }, +}; + +use url::Url; + +use crate::{ + error::StoreError, + value::Value, +}; + +pub(crate) fn read_transform(value: Result<&[u8], StoreError>) -> Result<Value, StoreError> { + match value { + Ok(bytes) => Value::from_tagged_slice(bytes).map_err(StoreError::DataError), + Err(e) => Err(e), + } +} + +// Workaround the UNC path on Windows, see https://github.com/rust-lang/rust/issues/42869. +// Otherwise, `Env::from_builder()` will panic with error_no(123). +pub(crate) fn canonicalize_path<'p, P>(path: P) -> io::Result<PathBuf> +where + P: Into<&'p Path>, +{ + let canonical = path.into().canonicalize()?; + + Ok(if cfg!(target_os = "windows") { + let map_err = |_| io::Error::new(io::ErrorKind::Other, "path canonicalization error"); + Url::from_file_path(&canonical).and_then(|url| url.to_file_path()).map_err(map_err)? + } else { + canonical + }) +} diff --git a/third_party/rust/rkv/src/lib.rs b/third_party/rust/rkv/src/lib.rs new file mode 100644 index 0000000000..20ec666a56 --- /dev/null +++ b/third_party/rust/rkv/src/lib.rs @@ -0,0 +1,254 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//! A simple, humane, typed key-value storage solution. It supports multiple backend +//! engines with varying guarantees, such as [LMDB](http://www.lmdb.tech/doc/) for +//! performance, or "SafeMode" for reliability. +//! +//! It aims to achieve the following: +//! +//! - Avoid sharp edges (e.g., obscure error codes for common situations). +//! - Report errors via [failure](https://docs.rs/failure/). +//! - Correctly restrict access to one handle per process via a +//! [Manager](struct.Manager.html). +//! - Use Rust's type system to make single-typed key stores safe and ergonomic. +//! - Encode and decode values via [bincode](https://docs.rs/bincode/)/[serde](https://docs.rs/serde/) +//! and type tags, achieving platform-independent storage and input/output flexibility. +//! +//! It exposes these primary abstractions: +//! +//! - [Manager](struct.Manager.html): a singleton that controls access to environments +//! - [Rkv](struct.Rkv.html): an environment contains a set of key/value databases +//! - [SingleStore](store/single/struct.SingleStore.html): a database contains a set of +//! key/value pairs +//! +//! Keys can be anything that implements `AsRef<[u8]>` or integers +//! (when accessing an [IntegerStore](store/integer/struct.IntegerStore.html)). +//! +//! Values can be any of the types defined by the [Value](value/enum.Value.html) enum, +//! including: +//! +//! - booleans (`Value::Bool`) +//! - integers (`Value::I64`, `Value::U64`) +//! - floats (`Value::F64`) +//! - strings (`Value::Str`) +//! - blobs (`Value::Blob`) +//! +//! See [Value](value/enum.Value.html) for the complete list of supported types. +//! +//! ## Basic Usage +//! ``` +//! use rkv::{Manager, Rkv, SingleStore, Value, StoreOptions}; +//! use rkv::backend::{Lmdb, LmdbEnvironment}; +//! use std::fs; +//! use tempfile::Builder; +//! +//! // First determine the path to the environment, which is represented on disk as a +//! // directory containing two files: +//! // +//! // * a data file containing the key/value stores +//! // * a lock file containing metadata about current transactions +//! // +//! // In this example, we use the `tempfile` crate to create the directory. +//! // +//! let root = Builder::new().prefix("simple-db").tempdir().unwrap(); +//! fs::create_dir_all(root.path()).unwrap(); +//! let path = root.path(); +//! +//! // The `Manager` enforces that each process opens the same environment at most once by +//! // caching a handle to each environment that it opens. Use it to retrieve the handle +//! // to an opened environment—or create one if it hasn't already been opened: +//! let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap(); +//! let created_arc = manager.get_or_create(path, Rkv::new::<Lmdb>).unwrap(); +//! let env = created_arc.read().unwrap(); +//! +//! // Then you can use the environment handle to get a handle to a datastore: +//! let store = env.open_single("mydb", StoreOptions::create()).unwrap(); +//! +//! { +//! // Use a write transaction to mutate the store via a `Writer`. There can be only +//! // one writer for a given environment, so opening a second one will block until +//! // the first completes. +//! let mut writer = env.write().unwrap(); +//! +//! // Keys are `AsRef<[u8]>`, while values are `Value` enum instances. Use the `Blob` +//! // variant to store arbitrary collections of bytes. Putting data returns a +//! // `Result<(), StoreError>`, where StoreError is an enum identifying the reason +//! // for a failure. +//! store.put(&mut writer, "int", &Value::I64(1234)).unwrap(); +//! store.put(&mut writer, "uint", &Value::U64(1234_u64)).unwrap(); +//! store.put(&mut writer, "float", &Value::F64(1234.0.into())).unwrap(); +//! store.put(&mut writer, "instant", &Value::Instant(1528318073700)).unwrap(); +//! store.put(&mut writer, "boolean", &Value::Bool(true)).unwrap(); +//! store.put(&mut writer, "string", &Value::Str("Héllo, wörld!")).unwrap(); +//! store.put(&mut writer, "json", &Value::Json(r#"{"foo":"bar", "number": 1}"#)).unwrap(); +//! store.put(&mut writer, "blob", &Value::Blob(b"blob")).unwrap(); +//! +//! // You must commit a write transaction before the writer goes out of scope, or the +//! // transaction will abort and the data won't persist. +//! writer.commit().unwrap(); +//! } +//! +//! { +//! // Use a read transaction to query the store via a `Reader`. There can be multiple +//! // concurrent readers for a store, and readers never block on a writer nor other +//! // readers. +//! let reader = env.read().expect("reader"); +//! +//! // Keys are `AsRef<u8>`, and the return value is `Result<Option<Value>, StoreError>`. +//! println!("Get int {:?}", store.get(&reader, "int").unwrap()); +//! println!("Get uint {:?}", store.get(&reader, "uint").unwrap()); +//! println!("Get float {:?}", store.get(&reader, "float").unwrap()); +//! println!("Get instant {:?}", store.get(&reader, "instant").unwrap()); +//! println!("Get boolean {:?}", store.get(&reader, "boolean").unwrap()); +//! println!("Get string {:?}", store.get(&reader, "string").unwrap()); +//! println!("Get json {:?}", store.get(&reader, "json").unwrap()); +//! println!("Get blob {:?}", store.get(&reader, "blob").unwrap()); +//! +//! // Retrieving a non-existent value returns `Ok(None)`. +//! println!("Get non-existent value {:?}", store.get(&reader, "non-existent").unwrap()); +//! +//! // A read transaction will automatically close once the reader goes out of scope, +//! // so isn't necessary to close it explicitly, although you can do so by calling +//! // `Reader.abort()`. +//! } +//! +//! { +//! // Aborting a write transaction rolls back the change(s). +//! let mut writer = env.write().unwrap(); +//! store.put(&mut writer, "foo", &Value::Str("bar")).unwrap(); +//! writer.abort(); +//! let reader = env.read().expect("reader"); +//! println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap()); +//! } +//! +//! { +//! // Explicitly aborting a transaction is not required unless an early abort is +//! // desired, since both read and write transactions will implicitly be aborted once +//! // they go out of scope. +//! { +//! let mut writer = env.write().unwrap(); +//! store.put(&mut writer, "foo", &Value::Str("bar")).unwrap(); +//! } +//! let reader = env.read().expect("reader"); +//! println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap()); +//! } +//! +//! { +//! // Deleting a key/value pair also requires a write transaction. +//! let mut writer = env.write().unwrap(); +//! store.put(&mut writer, "foo", &Value::Str("bar")).unwrap(); +//! store.put(&mut writer, "bar", &Value::Str("baz")).unwrap(); +//! store.delete(&mut writer, "foo").unwrap(); +//! +//! // A write transaction also supports reading, and the version of the store that it +//! // reads includes the changes it has made regardless of the commit state of that +//! // transaction. + +//! // In the code above, "foo" and "bar" were put into the store, then "foo" was +//! // deleted so only "bar" will return a result when the database is queried via the +//! // writer. +//! println!("It should be None! ({:?})", store.get(&writer, "foo").unwrap()); +//! println!("Get bar ({:?})", store.get(&writer, "bar").unwrap()); +//! +//! // But a reader won't see that change until the write transaction is committed. +//! { +//! let reader = env.read().expect("reader"); +//! println!("Get foo {:?}", store.get(&reader, "foo").unwrap()); +//! println!("Get bar {:?}", store.get(&reader, "bar").unwrap()); +//! } +//! writer.commit().unwrap(); +//! { +//! let reader = env.read().expect("reader"); +//! println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap()); +//! println!("Get bar {:?}", store.get(&reader, "bar").unwrap()); +//! } +//! +//! // Committing a transaction consumes the writer, preventing you from reusing it by +//! // failing at compile time with an error. This line would report "error[E0382]: +//! // borrow of moved value: `writer`". +//! // store.put(&mut writer, "baz", &Value::Str("buz")).unwrap(); +//! } +//! +//! { +//! // Clearing all the entries in the store with a write transaction. +//! { +//! let mut writer = env.write().unwrap(); +//! store.put(&mut writer, "foo", &Value::Str("bar")).unwrap(); +//! store.put(&mut writer, "bar", &Value::Str("baz")).unwrap(); +//! writer.commit().unwrap(); +//! } +//! +//! { +//! let mut writer = env.write().unwrap(); +//! store.clear(&mut writer).unwrap(); +//! writer.commit().unwrap(); +//! } +//! +//! { +//! let reader = env.read().expect("reader"); +//! println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap()); +//! println!("It should be None! ({:?})", store.get(&reader, "bar").unwrap()); +//! } +//! +//! } +//! +//! ``` + +mod env; +mod error; +mod helpers; +mod manager; +mod readwrite; + +pub mod backend; +pub mod migrator; +pub mod store; +pub mod value; + +pub use backend::{ + DatabaseFlags, + EnvironmentFlags, + WriteFlags, +}; +pub use env::Rkv; +pub use error::{ + DataError, + MigrateError, + StoreError, +}; +pub use manager::Manager; +pub use migrator::Migrator; +pub use readwrite::{ + Readable, + Reader, + Writer, +}; +pub use store::{ + keys::EncodableKey, + single::SingleStore, + CloseOptions, + Options as StoreOptions, +}; +pub use value::{ + OwnedValue, + Value, +}; + +#[cfg(feature = "db-dup-sort")] +pub use store::multi::MultiStore; + +#[cfg(feature = "db-int-key")] +pub use store::integer::IntegerStore; +#[cfg(feature = "db-int-key")] +pub use store::keys::PrimitiveInt; + +#[cfg(all(feature = "db-dup-sort", feature = "db-int-key"))] +pub use store::integermulti::MultiIntegerStore; diff --git a/third_party/rust/rkv/src/manager.rs b/third_party/rust/rkv/src/manager.rs new file mode 100644 index 0000000000..14e517c64c --- /dev/null +++ b/third_party/rust/rkv/src/manager.rs @@ -0,0 +1,231 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + collections::{ + btree_map::Entry, + BTreeMap, + }, + os::raw::c_uint, + path::{ + Path, + PathBuf, + }, + result, + sync::{ + Arc, + RwLock, + }, +}; + +use lazy_static::lazy_static; + +use crate::{ + backend::{ + BackendEnvironment, + BackendEnvironmentBuilder, + LmdbEnvironment, + SafeModeEnvironment, + }, + error::{ + CloseError, + StoreError, + }, + helpers::canonicalize_path, + store::CloseOptions, + Rkv, +}; + +type Result<T> = result::Result<T, StoreError>; +type CloseResult<T> = result::Result<T, CloseError>; +type SharedRkv<E> = Arc<RwLock<Rkv<E>>>; + +lazy_static! { + static ref MANAGER_LMDB: RwLock<Manager<LmdbEnvironment>> = RwLock::new(Manager::new()); + static ref MANAGER_SAFE_MODE: RwLock<Manager<SafeModeEnvironment>> = RwLock::new(Manager::new()); +} + +/// A process is only permitted to have one open handle to each Rkv environment. This +/// manager exists to enforce that constraint: don't open environments directly. +/// +/// By default, path canonicalization is enabled for identifying RKV instances. This +/// is true by default, because it helps enforce the constraints guaranteed by +/// this manager. However, path canonicalization might crash in some fringe +/// circumstances, so the `no-canonicalize-path` feature offers the possibility of +/// disabling it. See: https://bugzilla.mozilla.org/show_bug.cgi?id=1531887 +/// +/// When path canonicalization is disabled, you *must* ensure an RKV environment is +/// always created or retrieved with the same path. +pub struct Manager<E> { + environments: BTreeMap<PathBuf, SharedRkv<E>>, +} + +impl<'e, E> Manager<E> +where + E: BackendEnvironment<'e>, +{ + fn new() -> Manager<E> { + Manager { + environments: Default::default(), + } + } + + /// Return the open env at `path`, returning `None` if it has not already been opened. + pub fn get<'p, P>(&self, path: P) -> Result<Option<SharedRkv<E>>> + where + P: Into<&'p Path>, + { + let canonical = if cfg!(feature = "no-canonicalize-path") { + path.into().to_path_buf() + } else { + canonicalize_path(path)? + }; + Ok(self.environments.get(&canonical).cloned()) + } + + /// Return the open env at `path`, or create it by calling `f`. + pub fn get_or_create<'p, F, P>(&mut self, path: P, f: F) -> Result<SharedRkv<E>> + where + F: FnOnce(&Path) -> Result<Rkv<E>>, + P: Into<&'p Path>, + { + let canonical = if cfg!(feature = "no-canonicalize-path") { + path.into().to_path_buf() + } else { + canonicalize_path(path)? + }; + Ok(match self.environments.entry(canonical) { + Entry::Occupied(e) => e.get().clone(), + Entry::Vacant(e) => { + let k = Arc::new(RwLock::new(f(e.key().as_path())?)); + e.insert(k).clone() + }, + }) + } + + /// Return the open env at `path` with `capacity`, or create it by calling `f`. + pub fn get_or_create_with_capacity<'p, F, P>(&mut self, path: P, capacity: c_uint, f: F) -> Result<SharedRkv<E>> + where + F: FnOnce(&Path, c_uint) -> Result<Rkv<E>>, + P: Into<&'p Path>, + { + let canonical = if cfg!(feature = "no-canonicalize-path") { + path.into().to_path_buf() + } else { + canonicalize_path(path)? + }; + Ok(match self.environments.entry(canonical) { + Entry::Occupied(e) => e.get().clone(), + Entry::Vacant(e) => { + let k = Arc::new(RwLock::new(f(e.key().as_path(), capacity)?)); + e.insert(k).clone() + }, + }) + } + + /// Return a new Rkv environment from the builder, or create it by calling `f`. + pub fn get_or_create_from_builder<'p, F, P, B>(&mut self, path: P, builder: B, f: F) -> Result<SharedRkv<E>> + where + F: FnOnce(&Path, B) -> Result<Rkv<E>>, + P: Into<&'p Path>, + B: BackendEnvironmentBuilder<'e, Environment = E>, + { + let canonical = if cfg!(feature = "no-canonicalize-path") { + path.into().to_path_buf() + } else { + canonicalize_path(path)? + }; + Ok(match self.environments.entry(canonical) { + Entry::Occupied(e) => e.get().clone(), + Entry::Vacant(e) => { + let k = Arc::new(RwLock::new(f(e.key().as_path(), builder)?)); + e.insert(k).clone() + }, + }) + } + + /// Tries to close the specified environment. + /// Returns an error when other users of this environment still exist. + pub fn try_close<'p, P>(&mut self, path: P, options: CloseOptions) -> CloseResult<()> + where + P: Into<&'p Path>, + { + let canonical = if cfg!(feature = "no-canonicalize-path") { + path.into().to_path_buf() + } else { + canonicalize_path(path)? + }; + match self.environments.entry(canonical) { + Entry::Vacant(_) => Ok(()), + Entry::Occupied(e) if Arc::strong_count(e.get()) > 1 => Err(CloseError::EnvironmentStillOpen), + Entry::Occupied(e) => { + let env = Arc::try_unwrap(e.remove()).map_err(|_| CloseError::UnknownEnvironmentStillOpen)?; + env.into_inner()?.close(options)?; + Ok(()) + }, + } + } +} + +impl Manager<LmdbEnvironment> { + pub fn singleton() -> &'static RwLock<Manager<LmdbEnvironment>> { + &*MANAGER_LMDB + } +} + +impl Manager<SafeModeEnvironment> { + pub fn singleton() -> &'static RwLock<Manager<SafeModeEnvironment>> { + &*MANAGER_SAFE_MODE + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::*; + + use std::fs; + + use tempfile::Builder; + + use backend::Lmdb; + + /// Test that one can mutate managed Rkv instances in surprising ways. + #[test] + fn test_mutate_managed_rkv() { + let mut manager = Manager::<LmdbEnvironment>::new(); + + let root1 = Builder::new().prefix("test_mutate_managed_rkv_1").tempdir().expect("tempdir"); + fs::create_dir_all(root1.path()).expect("dir created"); + let path1 = root1.path(); + let arc = manager.get_or_create(path1, Rkv::new::<Lmdb>).expect("created"); + + // Arc<RwLock<>> has interior mutability, so we can replace arc's Rkv instance with a new + // instance that has a different path. + let root2 = Builder::new().prefix("test_mutate_managed_rkv_2").tempdir().expect("tempdir"); + fs::create_dir_all(root2.path()).expect("dir created"); + let path2 = root2.path(); + { + let mut rkv = arc.write().expect("guard"); + let rkv2 = Rkv::new::<Lmdb>(path2).expect("Rkv"); + *rkv = rkv2; + } + + // Arc now has a different internal Rkv with path2, but it's still mapped to path1 in + // manager, so its pointer is equal to a new Arc for path1. + let path1_arc = manager.get(path1).expect("success").expect("existed"); + assert!(Arc::ptr_eq(&path1_arc, &arc)); + + // Meanwhile, a new Arc for path2 has a different pointer, even though its Rkv's path is + // the same as arc's current path. + let path2_arc = manager.get_or_create(path2, Rkv::new::<Lmdb>).expect("success"); + assert!(!Arc::ptr_eq(&path2_arc, &arc)); + } +} diff --git a/third_party/rust/rkv/src/migrator.rs b/third_party/rust/rkv/src/migrator.rs new file mode 100644 index 0000000000..405d722b1c --- /dev/null +++ b/third_party/rust/rkv/src/migrator.rs @@ -0,0 +1,185 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//! A simple utility for migrating data from one RVK environment to another. Notably, this +//! tool can migrate data from an enviroment created with a different backend than the +//! current RKV consumer (e.g from Lmdb to SafeMode). +//! +//! The utility doesn't support migrating between 32-bit and 64-bit LMDB environments yet, +//! see `arch_migrator` if this is needed. However, this utility is ultimately intended to +//! handle all possible migrations. +//! +//! The destination environment should be empty of data, otherwise an error is returned. +//! +//! There are 3 versions of the migration methods: +//! * `migrate_<src>_to_<dst>`, where `<src>` and `<dst>` are the source and destination +//! environment types. You're responsive with opening both these environments, handling +//! all errors, and performing any cleanup if necessary. +//! * `open_and_migrate_<src>_to_<dst>`, which is similar the the above, but automatically +//! attempts to open the source environment and delete all of its supporting files if +//! there's no other environment open at that path. You're still responsible with +//! handling all errors. +//! * `easy_migrate_<src>_to_<dst>` which is similar to the above, but ignores the +//! migration and doesn't delete any files if the source environment is invalid +//! (corrupted), unavailable (path not accessible or incompatible with configuration), +//! or empty (database has no records). +//! +//! The tool currently has these limitations: +//! +//! 1. It doesn't support migration from environments created with +//! `EnvironmentFlags::NO_SUB_DIR`. To migrate such an environment, create a temporary +//! directory, copy the environment's data files in the temporary directory, then +//! migrate the temporary directory as the source environment. +//! 2. It doesn't support migration from databases created with DatabaseFlags::DUP_SORT` +//! (with or without `DatabaseFlags::DUP_FIXED`) nor with `DatabaseFlags::INTEGER_KEY`. +//! This effectively means that migration is limited to `SingleStore`s. +//! 3. It doesn't allow for existing data in the destination environment, which means that +//! it cannot overwrite nor append data. + +use crate::{ + backend::{ + LmdbEnvironment, + SafeModeEnvironment, + }, + error::MigrateError, + Rkv, + StoreOptions, +}; + +pub use crate::backend::{ + LmdbArchMigrateError, + LmdbArchMigrateResult, + LmdbArchMigrator, +}; + +// FIXME: should parametrize this instead. + +macro_rules! fn_migrator { + ($name:tt, $src_env:ty, $dst_env:ty) => { + /// Migrate all data in all of databases from the source environment to the destination + /// environment. This includes all key/value pairs in the main database that aren't + /// metadata about subdatabases and all key/value pairs in all subdatabases. + /// + /// Other backend-specific metadata such as map size or maximum databases left intact on + /// the given environments. + /// + /// The destination environment should be empty of data, otherwise an error is returned. + pub fn $name<S, D>(src_env: S, dst_env: D) -> Result<(), MigrateError> + where + S: std::ops::Deref<Target = Rkv<$src_env>>, + D: std::ops::Deref<Target = Rkv<$dst_env>>, + { + let src_dbs = src_env.get_dbs().unwrap(); + if src_dbs.is_empty() { + return Err(MigrateError::SourceEmpty); + } + let dst_dbs = dst_env.get_dbs().unwrap(); + if !dst_dbs.is_empty() { + return Err(MigrateError::DestinationNotEmpty); + } + for name in src_dbs { + let src_store = src_env.open_single(name.as_deref(), StoreOptions::default())?; + let dst_store = dst_env.open_single(name.as_deref(), StoreOptions::create())?; + let reader = src_env.read()?; + let mut writer = dst_env.write()?; + let mut iter = src_store.iter_start(&reader)?; + while let Some(Ok((key, value))) = iter.next() { + dst_store.put(&mut writer, key, &value).expect("wrote"); + } + writer.commit()?; + } + Ok(()) + } + }; + + (open $migrate:tt, $name:tt, $builder:tt, $src_env:ty, $dst_env:ty) => { + /// Same as the the `migrate_x_to_y` migration method above, but automatically attempts + /// to open the source environment. Finally, deletes all of its supporting files if + /// there's no other environment open at that path and the migration succeeded. + pub fn $name<F, D>(path: &std::path::Path, build: F, dst_env: D) -> Result<(), MigrateError> + where + F: FnOnce(crate::backend::$builder) -> crate::backend::$builder, + D: std::ops::Deref<Target = Rkv<$dst_env>>, + { + use crate::{ + backend::*, + CloseOptions, + }; + + let mut manager = crate::Manager::<$src_env>::singleton().write()?; + let mut builder = Rkv::<$src_env>::environment_builder::<$builder>(); + builder.set_max_dbs(crate::env::DEFAULT_MAX_DBS); + builder = build(builder); + + let src_env = manager.get_or_create_from_builder(path, builder, Rkv::from_builder::<$builder>)?; + Migrator::$migrate(src_env.read()?, dst_env)?; + + drop(src_env); + manager.try_close(path, CloseOptions::delete_files_on_disk())?; + + Ok(()) + } + }; + + (easy $migrate:tt, $name:tt, $src_env:ty, $dst_env:ty) => { + /// Same as the `open_and_migrate_x_to_y` migration method above, but ignores the + /// migration and doesn't delete any files if the following conditions apply: + /// - Source environment is invalid/corrupted, unavailable, or empty. + /// - Destination environment is not empty. + /// Use this instead of the other migration methods if: + /// - You're not concerned by throwing away old data and starting fresh with a new store. + /// - You'll never want to overwrite data in the new store from the old store. + pub fn $name<D>(path: &std::path::Path, dst_env: D) -> Result<(), MigrateError> + where + D: std::ops::Deref<Target = Rkv<$dst_env>>, + { + match Migrator::$migrate(path, |builder| builder, dst_env) { + // Source environment is an invalid file or corrupted database. + Err(crate::MigrateError::StoreError(crate::StoreError::FileInvalid)) => Ok(()), + Err(crate::MigrateError::StoreError(crate::StoreError::DatabaseCorrupted)) => Ok(()), + // Path not accessible. + Err(crate::MigrateError::StoreError(crate::StoreError::IoError(_))) => Ok(()), + // Path accessible but incompatible for configuration. + Err(crate::MigrateError::StoreError(crate::StoreError::UnsuitableEnvironmentPath(_))) => Ok(()), + // Couldn't close source environment and delete files on disk (e.g. other stores still open). + Err(crate::MigrateError::CloseError(_)) => Ok(()), + // Nothing to migrate. + Err(crate::MigrateError::SourceEmpty) => Ok(()), + // Migrating would overwrite. + Err(crate::MigrateError::DestinationNotEmpty) => Ok(()), + result => result, + }?; + + Ok(()) + } + }; +} + +macro_rules! fns_migrator { + ($src:tt, $dst:tt) => { + paste::item! { + fns_migrator!([<migrate_ $src _to_ $dst>], $src, $dst); + fns_migrator!([<migrate_ $dst _to_ $src>], $dst, $src); + } + }; + ($name:tt, $src:tt, $dst:tt) => { + paste::item! { + fn_migrator!($name, [<$src:camel Environment>], [<$dst:camel Environment>]); + fn_migrator!(open $name, [<open_and_ $name>], [<$src:camel>], [<$src:camel Environment>], [<$dst:camel Environment>]); + fn_migrator!(easy [<open_and_ $name>], [<easy_ $name>], [<$src:camel Environment>], [<$dst:camel Environment>]); + } + }; +} + +pub struct Migrator; + +impl Migrator { + fns_migrator!(lmdb, safe_mode); +} diff --git a/third_party/rust/rkv/src/readwrite.rs b/third_party/rust/rkv/src/readwrite.rs new file mode 100644 index 0000000000..50ed2a1d88 --- /dev/null +++ b/third_party/rust/rkv/src/readwrite.rs @@ -0,0 +1,145 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use crate::{ + backend::{ + BackendDatabase, + BackendRoCursor, + BackendRoCursorTransaction, + BackendRoTransaction, + BackendRwCursorTransaction, + BackendRwTransaction, + }, + error::StoreError, + helpers::read_transform, + value::Value, +}; + +pub struct Reader<T>(T); +pub struct Writer<T>(T); + +pub trait Readable<'r> { + type Database: BackendDatabase; + type RoCursor: BackendRoCursor<'r>; + + fn get<K>(&'r self, db: &Self::Database, k: &K) -> Result<Option<Value<'r>>, StoreError> + where + K: AsRef<[u8]>; + + fn open_ro_cursor(&'r self, db: &Self::Database) -> Result<Self::RoCursor, StoreError>; +} + +impl<'r, T> Readable<'r> for Reader<T> +where + T: BackendRoCursorTransaction<'r>, +{ + type Database = T::Database; + type RoCursor = T::RoCursor; + + fn get<K>(&'r self, db: &T::Database, k: &K) -> Result<Option<Value<'r>>, StoreError> + where + K: AsRef<[u8]>, + { + let bytes = self.0.get(db, k.as_ref()).map_err(|e| e.into()); + match read_transform(bytes).map(Some) { + Err(StoreError::KeyValuePairNotFound) => Ok(None), + result => result, + } + } + + fn open_ro_cursor(&'r self, db: &T::Database) -> Result<T::RoCursor, StoreError> { + self.0.open_ro_cursor(db).map_err(|e| e.into()) + } +} + +impl<T> Reader<T> { + pub(crate) fn new(txn: T) -> Reader<T> { + Reader(txn) + } +} + +impl<T> Reader<T> +where + T: BackendRoTransaction, +{ + pub fn abort(self) { + self.0.abort(); + } +} + +impl<'r, T> Readable<'r> for Writer<T> +where + T: BackendRwCursorTransaction<'r>, +{ + type Database = T::Database; + type RoCursor = T::RoCursor; + + fn get<K>(&'r self, db: &T::Database, k: &K) -> Result<Option<Value<'r>>, StoreError> + where + K: AsRef<[u8]>, + { + let bytes = self.0.get(db, k.as_ref()).map_err(|e| e.into()); + match read_transform(bytes).map(Some) { + Err(StoreError::KeyValuePairNotFound) => Ok(None), + result => result, + } + } + + fn open_ro_cursor(&'r self, db: &T::Database) -> Result<T::RoCursor, StoreError> { + self.0.open_ro_cursor(db).map_err(|e| e.into()) + } +} + +impl<T> Writer<T> { + pub(crate) fn new(txn: T) -> Writer<T> { + Writer(txn) + } +} + +impl<T> Writer<T> +where + T: BackendRwTransaction, +{ + pub fn commit(self) -> Result<(), StoreError> { + self.0.commit().map_err(|e| e.into()) + } + + pub fn abort(self) { + self.0.abort(); + } + + pub(crate) fn put<K>(&mut self, db: &T::Database, k: &K, v: &Value, flags: T::Flags) -> Result<(), StoreError> + where + K: AsRef<[u8]>, + { + // TODO: don't allocate twice. + self.0.put(db, k.as_ref(), &v.to_bytes()?, flags).map_err(|e| e.into()) + } + + #[cfg(not(feature = "db-dup-sort"))] + pub(crate) fn delete<K>(&mut self, db: &T::Database, k: &K) -> Result<(), StoreError> + where + K: AsRef<[u8]>, + { + self.0.del(db, k.as_ref()).map_err(|e| e.into()) + } + + #[cfg(feature = "db-dup-sort")] + pub(crate) fn delete<K>(&mut self, db: &T::Database, k: &K, v: Option<&[u8]>) -> Result<(), StoreError> + where + K: AsRef<[u8]>, + { + self.0.del(db, k.as_ref(), v).map_err(|e| e.into()) + } + + pub(crate) fn clear(&mut self, db: &T::Database) -> Result<(), StoreError> { + self.0.clear_db(db).map_err(|e| e.into()) + } +} diff --git a/third_party/rust/rkv/src/store.rs b/third_party/rust/rkv/src/store.rs new file mode 100644 index 0000000000..85b905e489 --- /dev/null +++ b/third_party/rust/rkv/src/store.rs @@ -0,0 +1,54 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +pub mod keys; +pub mod single; + +#[cfg(feature = "db-dup-sort")] +pub mod multi; + +#[cfg(feature = "db-int-key")] +pub mod integer; + +#[cfg(all(feature = "db-dup-sort", feature = "db-int-key"))] +pub mod integermulti; + +use crate::backend::BackendDatabaseFlags; + +#[derive(Default, Debug, Copy, Clone)] +pub struct Options<F> { + pub create: bool, + pub flags: F, +} + +impl<F> Options<F> +where + F: BackendDatabaseFlags, +{ + pub fn create() -> Options<F> { + Options { + create: true, + flags: F::empty(), + } + } +} + +#[derive(Default, Debug, Copy, Clone)] +pub struct CloseOptions { + pub delete: bool, +} + +impl CloseOptions { + pub fn delete_files_on_disk() -> CloseOptions { + CloseOptions { + delete: true, + } + } +} diff --git a/third_party/rust/rkv/src/store/integer.rs b/third_party/rust/rkv/src/store/integer.rs new file mode 100644 index 0000000000..6a64afb807 --- /dev/null +++ b/third_party/rust/rkv/src/store/integer.rs @@ -0,0 +1,548 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::marker::PhantomData; + +use crate::{ + backend::{ + BackendDatabase, + BackendRwTransaction, + }, + error::StoreError, + readwrite::{ + Readable, + Writer, + }, + store::{ + keys::{ + Key, + PrimitiveInt, + }, + single::SingleStore, + }, + value::Value, +}; + +type EmptyResult = Result<(), StoreError>; + +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub struct IntegerStore<D, K> { + inner: SingleStore<D>, + phantom: PhantomData<K>, +} + +impl<D, K> IntegerStore<D, K> +where + D: BackendDatabase, + K: PrimitiveInt, +{ + pub(crate) fn new(db: D) -> IntegerStore<D, K> { + IntegerStore { + inner: SingleStore::new(db), + phantom: PhantomData, + } + } + + pub fn get<'r, R>(&self, reader: &'r R, k: K) -> Result<Option<Value<'r>>, StoreError> + where + R: Readable<'r, Database = D>, + { + self.inner.get(reader, Key::new(&k)?) + } + + pub fn put<T>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + { + self.inner.put(writer, Key::new(&k)?, v) + } + + pub fn delete<T>(&self, writer: &mut Writer<T>, k: K) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + { + self.inner.delete(writer, Key::new(&k)?) + } + + pub fn clear<T>(&self, writer: &mut Writer<T>) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + { + self.inner.clear(writer) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::*; + + use std::fs; + + use tempfile::Builder; + + #[test] + fn test_integer_keys() { + let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + macro_rules! test_integer_keys { + ($type:ty, $key:expr) => {{ + let mut writer = k.write().expect("writer"); + + s.put(&mut writer, $key, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get(&writer, $key).expect("read"), Some(Value::Str("hello!"))); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, $key).expect("read"), Some(Value::Str("hello!"))); + }}; + } + + test_integer_keys!(u32, std::u32::MIN); + test_integer_keys!(u32, std::u32::MAX); + } + + #[test] + fn test_clear() { + let root = Builder::new().prefix("test_integer_clear").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 2, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 3, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("hello!"))); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 1).expect("read"), None); + assert_eq!(s.get(&reader, 2).expect("read"), None); + assert_eq!(s.get(&reader, 3).expect("read"), None); + } + } + + #[test] + fn test_dup() { + let root = Builder::new().prefix("test_integer_dup").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("foo!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("bar!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("bar!"))); + assert_eq!(s.get(&writer, 2).expect("read"), None); + assert_eq!(s.get(&writer, 3).expect("read"), None); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 1).expect("read"), None); + assert_eq!(s.get(&reader, 2).expect("read"), None); + assert_eq!(s.get(&reader, 3).expect("read"), None); + } + } + + #[test] + fn test_del() { + let root = Builder::new().prefix("test_integer_del").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("foo!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("bar!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("bar!"))); + assert_eq!(s.get(&writer, 2).expect("read"), None); + assert_eq!(s.get(&writer, 3).expect("read"), None); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 1).expect("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 1).expect("read"), None); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 2).expect_err("not deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 2).expect("read"), None); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 3).expect_err("not deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 3).expect("read"), None); + } + } + + #[test] + fn test_persist() { + let root = Builder::new().prefix("test_integer_persist").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + { + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 2, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 3, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("hello!"))); + writer.commit().expect("committed"); + } + + { + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&reader, 2).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&reader, 3).expect("read"), Some(Value::Str("hello!"))); + } + } + + #[test] + fn test_intertwine_read_write() { + let root = Builder::new().prefix("test_integer_intertwine_read_write").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 2).expect("read"), None); + assert_eq!(s.get(&writer, 3).expect("read"), None); + writer.commit().expect("committed"); + } + + let reader = k.read().expect("reader"); + let mut writer = k.write().expect("writer"); + + { + assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&reader, 2).expect("read"), None); + assert_eq!(s.get(&reader, 3).expect("read"), None); + } + + { + s.put(&mut writer, 1, &Value::Str("goodbye!")).expect("write"); + s.put(&mut writer, 2, &Value::Str("goodbye!")).expect("write"); + s.put(&mut writer, 3, &Value::Str("goodbye!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("goodbye!"))); + assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("goodbye!"))); + assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("goodbye!"))); + writer.commit().expect("committed"); + } + + { + assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&reader, 2).expect("read"), None); + assert_eq!(s.get(&reader, 3).expect("read"), None); + } + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("goodbye!"))); + assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("goodbye!"))); + writer.commit().expect("committed"); + } + + { + let reader = k.write().expect("reader"); + assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&reader, 2).expect("read"), Some(Value::Str("goodbye!"))); + assert_eq!(s.get(&reader, 3).expect("read"), Some(Value::Str("goodbye!"))); + reader.commit().expect("committed"); + } + } +} + +#[cfg(test)] +mod tests_safe { + use super::*; + use crate::*; + + use std::fs; + + use tempfile::Builder; + + #[test] + fn test_integer_keys() { + let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + macro_rules! test_integer_keys { + ($type:ty, $key:expr) => {{ + let mut writer = k.write().expect("writer"); + + s.put(&mut writer, $key, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get(&writer, $key).expect("read"), Some(Value::Str("hello!"))); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, $key).expect("read"), Some(Value::Str("hello!"))); + }}; + } + + test_integer_keys!(u32, std::u32::MIN); + test_integer_keys!(u32, std::u32::MAX); + } + + #[test] + fn test_clear() { + let root = Builder::new().prefix("test_integer_clear").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 2, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 3, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("hello!"))); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 1).expect("read"), None); + assert_eq!(s.get(&reader, 2).expect("read"), None); + assert_eq!(s.get(&reader, 3).expect("read"), None); + } + } + + #[test] + fn test_dup() { + let root = Builder::new().prefix("test_integer_dup").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("foo!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("bar!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("bar!"))); + assert_eq!(s.get(&writer, 2).expect("read"), None); + assert_eq!(s.get(&writer, 3).expect("read"), None); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 1).expect("read"), None); + assert_eq!(s.get(&reader, 2).expect("read"), None); + assert_eq!(s.get(&reader, 3).expect("read"), None); + } + } + + #[test] + fn test_del() { + let root = Builder::new().prefix("test_integer_del").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("foo!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("bar!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("bar!"))); + assert_eq!(s.get(&writer, 2).expect("read"), None); + assert_eq!(s.get(&writer, 3).expect("read"), None); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 1).expect("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 1).expect("read"), None); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 2).expect_err("not deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 2).expect("read"), None); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 3).expect_err("not deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 3).expect("read"), None); + } + } + + #[test] + fn test_persist() { + let root = Builder::new().prefix("test_integer_persist").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + { + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 2, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 3, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("hello!"))); + writer.commit().expect("committed"); + } + + { + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&reader, 2).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&reader, 3).expect("read"), Some(Value::Str("hello!"))); + } + } + + #[test] + fn test_intertwine_read_write() { + let root = Builder::new().prefix("test_integer_intertwine_read_write").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 2).expect("read"), None); + assert_eq!(s.get(&writer, 3).expect("read"), None); + writer.commit().expect("committed"); + } + + let reader = k.read().expect("reader"); + let mut writer = k.write().expect("writer"); + + { + assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&reader, 2).expect("read"), None); + assert_eq!(s.get(&reader, 3).expect("read"), None); + } + + { + s.put(&mut writer, 1, &Value::Str("goodbye!")).expect("write"); + s.put(&mut writer, 2, &Value::Str("goodbye!")).expect("write"); + s.put(&mut writer, 3, &Value::Str("goodbye!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("goodbye!"))); + assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("goodbye!"))); + assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("goodbye!"))); + writer.commit().expect("committed"); + } + + { + assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&reader, 2).expect("read"), None); + assert_eq!(s.get(&reader, 3).expect("read"), None); + } + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("goodbye!"))); + assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("goodbye!"))); + writer.commit().expect("committed"); + } + + { + let reader = k.write().expect("reader"); + assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get(&reader, 2).expect("read"), Some(Value::Str("goodbye!"))); + assert_eq!(s.get(&reader, 3).expect("read"), Some(Value::Str("goodbye!"))); + reader.commit().expect("committed"); + } + } +} diff --git a/third_party/rust/rkv/src/store/integermulti.rs b/third_party/rust/rkv/src/store/integermulti.rs new file mode 100644 index 0000000000..f157c62d99 --- /dev/null +++ b/third_party/rust/rkv/src/store/integermulti.rs @@ -0,0 +1,543 @@ +// Copyright 2018 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::marker::PhantomData; + +use crate::{ + backend::{ + BackendDatabase, + BackendIter, + BackendRoCursor, + BackendRwTransaction, + }, + error::StoreError, + readwrite::{ + Readable, + Writer, + }, + store::{ + keys::{ + Key, + PrimitiveInt, + }, + multi::{ + Iter, + MultiStore, + }, + }, + value::Value, +}; + +type EmptyResult = Result<(), StoreError>; + +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub struct MultiIntegerStore<D, K> { + inner: MultiStore<D>, + phantom: PhantomData<K>, +} + +impl<D, K> MultiIntegerStore<D, K> +where + D: BackendDatabase, + K: PrimitiveInt, +{ + pub(crate) fn new(db: D) -> MultiIntegerStore<D, K> { + MultiIntegerStore { + inner: MultiStore::new(db), + phantom: PhantomData, + } + } + + pub fn get<'r, R, I, C>(&self, reader: &'r R, k: K) -> Result<Iter<'r, I>, StoreError> + where + R: Readable<'r, Database = D, RoCursor = C>, + I: BackendIter<'r>, + C: BackendRoCursor<'r, Iter = I>, + K: 'r, + { + self.inner.get(reader, Key::new(&k)?) + } + + pub fn get_first<'r, R>(&self, reader: &'r R, k: K) -> Result<Option<Value<'r>>, StoreError> + where + R: Readable<'r, Database = D>, + { + self.inner.get_first(reader, Key::new(&k)?) + } + + pub fn put<T>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + { + self.inner.put(writer, Key::new(&k)?, v) + } + + pub fn put_with_flags<T>(&self, writer: &mut Writer<T>, k: K, v: &Value, flags: T::Flags) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + { + self.inner.put_with_flags(writer, Key::new(&k)?, v, flags) + } + + pub fn delete_all<T>(&self, writer: &mut Writer<T>, k: K) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + { + self.inner.delete_all(writer, Key::new(&k)?) + } + + pub fn delete<T>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + { + self.inner.delete(writer, Key::new(&k)?, v) + } + + pub fn clear<T>(&self, writer: &mut Writer<T>) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + { + self.inner.clear(writer) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::*; + + use std::fs; + + use tempfile::Builder; + + #[test] + fn test_integer_keys() { + let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + macro_rules! test_integer_keys { + ($type:ty, $key:expr) => {{ + let mut writer = k.write().expect("writer"); + + s.put(&mut writer, $key, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get_first(&writer, $key).expect("read"), Some(Value::Str("hello!"))); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get_first(&reader, $key).expect("read"), Some(Value::Str("hello!"))); + }}; + } + + test_integer_keys!(u32, std::u32::MIN); + test_integer_keys!(u32, std::u32::MAX); + } + + #[test] + fn test_clear() { + let root = Builder::new().prefix("test_multi_integer_clear").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); + s.put(&mut writer, 2, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get_first(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get_first(&writer, 2).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get_first(&writer, 3).expect("read"), None); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get_first(&reader, 1).expect("read"), None); + assert_eq!(s.get_first(&reader, 2).expect("read"), None); + assert_eq!(s.get_first(&reader, 3).expect("read"), None); + } + } + + #[test] + fn test_dup() { + let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); + assert_eq!(s.get_first(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get_first(&writer, 2).expect("read"), None); + assert_eq!(s.get_first(&writer, 3).expect("read"), None); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get_first(&reader, 1).expect("read"), None); + assert_eq!(s.get_first(&reader, 2).expect("read"), None); + assert_eq!(s.get_first(&reader, 3).expect("read"), None); + } + } + + #[test] + fn test_dup_2() { + let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); + + let mut iter = s.get(&writer, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + } + + #[test] + fn test_del() { + let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); + { + let mut iter = s.get(&writer, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 1, &Value::Str("hello!")).expect("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + let mut iter = s.get(&reader, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 1, &Value::Str("hello!")).expect_err("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + let mut iter = s.get(&reader, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 1, &Value::Str("hello1!")).expect("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + let mut iter = s.get(&reader, 1).expect("read"); + assert!(iter.next().is_none()); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 1, &Value::Str("hello1!")).expect_err("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + let mut iter = s.get(&reader, 1).expect("read"); + assert!(iter.next().is_none()); + } + } + + #[test] + fn test_persist() { + let root = Builder::new().prefix("test_multi_integer_persist").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + { + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); + s.put(&mut writer, 2, &Value::Str("hello!")).expect("write"); + { + let mut iter = s.get(&writer, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + writer.commit().expect("committed"); + } + + { + let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + let reader = k.read().expect("reader"); + let mut iter = s.get(&reader, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + } +} + +#[cfg(test)] +mod tests_safe { + use super::*; + use crate::*; + + use std::fs; + + use tempfile::Builder; + + #[test] + fn test_integer_keys() { + let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + macro_rules! test_integer_keys { + ($type:ty, $key:expr) => {{ + let mut writer = k.write().expect("writer"); + + s.put(&mut writer, $key, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get_first(&writer, $key).expect("read"), Some(Value::Str("hello!"))); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get_first(&reader, $key).expect("read"), Some(Value::Str("hello!"))); + }}; + } + + test_integer_keys!(u32, std::u32::MIN); + test_integer_keys!(u32, std::u32::MAX); + } + + #[test] + fn test_clear() { + let root = Builder::new().prefix("test_multi_integer_clear").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); + s.put(&mut writer, 2, &Value::Str("hello!")).expect("write"); + assert_eq!(s.get_first(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get_first(&writer, 2).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get_first(&writer, 3).expect("read"), None); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get_first(&reader, 1).expect("read"), None); + assert_eq!(s.get_first(&reader, 2).expect("read"), None); + assert_eq!(s.get_first(&reader, 3).expect("read"), None); + } + } + + #[test] + fn test_dup() { + let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); + assert_eq!(s.get_first(&writer, 1).expect("read"), Some(Value::Str("hello!"))); + assert_eq!(s.get_first(&writer, 2).expect("read"), None); + assert_eq!(s.get_first(&writer, 3).expect("read"), None); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s.get_first(&reader, 1).expect("read"), None); + assert_eq!(s.get_first(&reader, 2).expect("read"), None); + assert_eq!(s.get_first(&reader, 3).expect("read"), None); + } + } + + #[test] + fn test_dup_2() { + let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); + + let mut iter = s.get(&writer, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + } + + #[test] + fn test_del() { + let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); + { + let mut iter = s.get(&writer, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 1, &Value::Str("hello!")).expect("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + let mut iter = s.get(&reader, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 1, &Value::Str("hello!")).expect_err("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + let mut iter = s.get(&reader, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 1, &Value::Str("hello1!")).expect("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + let mut iter = s.get(&reader, 1).expect("read"); + assert!(iter.next().is_none()); + } + + { + let mut writer = k.write().expect("writer"); + s.delete(&mut writer, 1, &Value::Str("hello1!")).expect_err("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + let mut iter = s.get(&reader, 1).expect("read"); + assert!(iter.next().is_none()); + } + } + + #[test] + fn test_persist() { + let root = Builder::new().prefix("test_multi_integer_persist").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + { + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + let mut writer = k.write().expect("writer"); + s.put(&mut writer, 1, &Value::Str("hello!")).expect("write"); + s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write"); + s.put(&mut writer, 2, &Value::Str("hello!")).expect("write"); + { + let mut iter = s.get(&writer, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + writer.commit().expect("committed"); + } + + { + let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + let reader = k.read().expect("reader"); + let mut iter = s.get(&reader, 1).expect("read"); + assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!")); + assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!")); + assert!(iter.next().is_none()); + } + } +} diff --git a/third_party/rust/rkv/src/store/keys.rs b/third_party/rust/rkv/src/store/keys.rs new file mode 100644 index 0000000000..26a13db47a --- /dev/null +++ b/third_party/rust/rkv/src/store/keys.rs @@ -0,0 +1,46 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +mod encodables; +mod primitives; + +use std::marker::PhantomData; + +use crate::error::DataError; + +pub use encodables::*; +pub use primitives::*; + +pub(crate) struct Key<K> { + bytes: Vec<u8>, + phantom: PhantomData<K>, +} + +impl<K> AsRef<[u8]> for Key<K> +where + K: EncodableKey, +{ + fn as_ref(&self) -> &[u8] { + self.bytes.as_ref() + } +} + +impl<K> Key<K> +where + K: EncodableKey, +{ + #[allow(clippy::new_ret_no_self)] + pub fn new(k: &K) -> Result<Key<K>, DataError> { + Ok(Key { + bytes: k.to_bytes()?, + phantom: PhantomData, + }) + } +} diff --git a/third_party/rust/rkv/src/store/keys/encodables.rs b/third_party/rust/rkv/src/store/keys/encodables.rs new file mode 100644 index 0000000000..85e3eacdf5 --- /dev/null +++ b/third_party/rust/rkv/src/store/keys/encodables.rs @@ -0,0 +1,27 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use bincode::serialize; +use serde::Serialize; + +use crate::error::DataError; + +pub trait EncodableKey { + fn to_bytes(&self) -> Result<Vec<u8>, DataError>; +} + +impl<T> EncodableKey for T +where + T: Serialize, +{ + fn to_bytes(&self) -> Result<Vec<u8>, DataError> { + serialize(self).map_err(|e| e.into()) + } +} diff --git a/third_party/rust/rkv/src/store/keys/primitives.rs b/third_party/rust/rkv/src/store/keys/primitives.rs new file mode 100644 index 0000000000..26282b7cbb --- /dev/null +++ b/third_party/rust/rkv/src/store/keys/primitives.rs @@ -0,0 +1,15 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use crate::store::keys::EncodableKey; + +pub trait PrimitiveInt: EncodableKey {} + +impl PrimitiveInt for u32 {} diff --git a/third_party/rust/rkv/src/store/multi.rs b/third_party/rust/rkv/src/store/multi.rs new file mode 100644 index 0000000000..04714badcb --- /dev/null +++ b/third_party/rust/rkv/src/store/multi.rs @@ -0,0 +1,140 @@ +// Copyright 2018 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::marker::PhantomData; + +use crate::{ + backend::{ + BackendDatabase, + BackendFlags, + BackendIter, + BackendRoCursor, + BackendRwTransaction, + }, + error::StoreError, + helpers::read_transform, + readwrite::{ + Readable, + Writer, + }, + value::Value, +}; + +type EmptyResult = Result<(), StoreError>; + +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub struct MultiStore<D> { + db: D, +} + +pub struct Iter<'i, I> { + iter: I, + phantom: PhantomData<&'i ()>, +} + +impl<D> MultiStore<D> +where + D: BackendDatabase, +{ + pub(crate) fn new(db: D) -> MultiStore<D> { + MultiStore { + db, + } + } + + /// Provides a cursor to all of the values for the duplicate entries that match this + /// key + pub fn get<'r, R, I, C, K>(&self, reader: &'r R, k: K) -> Result<Iter<'r, I>, StoreError> + where + R: Readable<'r, Database = D, RoCursor = C>, + I: BackendIter<'r>, + C: BackendRoCursor<'r, Iter = I>, + K: AsRef<[u8]> + 'r, + { + let cursor = reader.open_ro_cursor(&self.db)?; + let iter = cursor.into_iter_dup_of(k); + + Ok(Iter { + iter, + phantom: PhantomData, + }) + } + + /// Provides the first value that matches this key + pub fn get_first<'r, R, K>(&self, reader: &'r R, k: K) -> Result<Option<Value<'r>>, StoreError> + where + R: Readable<'r, Database = D>, + K: AsRef<[u8]>, + { + reader.get(&self.db, &k) + } + + /// Insert a value at the specified key. + /// This put will allow duplicate entries. If you wish to have duplicate entries + /// rejected, use the `put_with_flags` function and specify NO_DUP_DATA + pub fn put<T, K>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + K: AsRef<[u8]>, + { + writer.put(&self.db, &k, v, T::Flags::empty()) + } + + pub fn put_with_flags<T, K>(&self, writer: &mut Writer<T>, k: K, v: &Value, flags: T::Flags) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + K: AsRef<[u8]>, + { + writer.put(&self.db, &k, v, flags) + } + + pub fn delete_all<T, K>(&self, writer: &mut Writer<T>, k: K) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + K: AsRef<[u8]>, + { + writer.delete(&self.db, &k, None) + } + + pub fn delete<T, K>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + K: AsRef<[u8]>, + { + writer.delete(&self.db, &k, Some(&v.to_bytes()?)) + } + + pub fn clear<T>(&self, writer: &mut Writer<T>) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + { + writer.clear(&self.db) + } +} + +impl<'i, I> Iterator for Iter<'i, I> +where + I: BackendIter<'i>, +{ + type Item = Result<(&'i [u8], Value<'i>), StoreError>; + + fn next(&mut self) -> Option<Self::Item> { + match self.iter.next() { + None => None, + Some(Ok((key, bytes))) => { + match read_transform(Ok(bytes)) { + Ok(val) => Some(Ok((key, val))), + Err(err) => Some(Err(err)), + } + }, + Some(Err(err)) => Some(Err(err.into())), + } + } +} diff --git a/third_party/rust/rkv/src/store/single.rs b/third_party/rust/rkv/src/store/single.rs new file mode 100644 index 0000000000..bb7a5ab755 --- /dev/null +++ b/third_party/rust/rkv/src/store/single.rs @@ -0,0 +1,145 @@ +// Copyright 2018 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::marker::PhantomData; + +use crate::{ + backend::{ + BackendDatabase, + BackendFlags, + BackendIter, + BackendRoCursor, + BackendRwTransaction, + }, + error::StoreError, + helpers::read_transform, + readwrite::{ + Readable, + Writer, + }, + value::Value, +}; + +type EmptyResult = Result<(), StoreError>; + +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub struct SingleStore<D> { + db: D, +} + +pub struct Iter<'i, I> { + iter: I, + phantom: PhantomData<&'i ()>, +} + +impl<D> SingleStore<D> +where + D: BackendDatabase, +{ + pub(crate) fn new(db: D) -> SingleStore<D> { + SingleStore { + db, + } + } + + pub fn get<'r, R, K>(&self, reader: &'r R, k: K) -> Result<Option<Value<'r>>, StoreError> + where + R: Readable<'r, Database = D>, + K: AsRef<[u8]>, + { + reader.get(&self.db, &k) + } + + // TODO: flags + pub fn put<T, K>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + K: AsRef<[u8]>, + { + writer.put(&self.db, &k, v, T::Flags::empty()) + } + + #[cfg(not(feature = "db-dup-sort"))] + pub fn delete<T, K>(&self, writer: &mut Writer<T>, k: K) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + K: AsRef<[u8]>, + { + writer.delete(&self.db, &k) + } + + #[cfg(feature = "db-dup-sort")] + pub fn delete<T, K>(&self, writer: &mut Writer<T>, k: K) -> EmptyResult + where + T: BackendRwTransaction<Database = D>, + K: AsRef<[u8]>, + { + writer.delete(&self.db, &k, None) + } + + pub fn iter_start<'r, R, I, C>(&self, reader: &'r R) -> Result<Iter<'r, I>, StoreError> + where + R: Readable<'r, Database = D, RoCursor = C>, + I: BackendIter<'r>, + C: BackendRoCursor<'r, Iter = I>, + { + let cursor = reader.open_ro_cursor(&self.db)?; + let iter = cursor.into_iter(); + + Ok(Iter { + iter, + phantom: PhantomData, + }) + } + + pub fn iter_from<'r, R, I, C, K>(&self, reader: &'r R, k: K) -> Result<Iter<'r, I>, StoreError> + where + R: Readable<'r, Database = D, RoCursor = C>, + I: BackendIter<'r>, + C: BackendRoCursor<'r, Iter = I>, + K: AsRef<[u8]> + 'r, + { + let cursor = reader.open_ro_cursor(&self.db)?; + let iter = cursor.into_iter_from(k); + + Ok(Iter { + iter, + phantom: PhantomData, + }) + } + + pub fn clear<T>(&self, writer: &mut Writer<T>) -> EmptyResult + where + D: BackendDatabase, + T: BackendRwTransaction<Database = D>, + { + writer.clear(&self.db) + } +} + +impl<'i, I> Iterator for Iter<'i, I> +where + I: BackendIter<'i>, +{ + type Item = Result<(&'i [u8], Value<'i>), StoreError>; + + fn next(&mut self) -> Option<Self::Item> { + match self.iter.next() { + None => None, + Some(Ok((key, bytes))) => { + match read_transform(Ok(bytes)) { + Ok(val) => Some(Ok((key, val))), + Err(err) => Some(Err(err)), + } + }, + Some(Err(err)) => Some(Err(err.into())), + } + } +} diff --git a/third_party/rust/rkv/src/value.rs b/third_party/rust/rkv/src/value.rs new file mode 100644 index 0000000000..8d60ea21f7 --- /dev/null +++ b/third_party/rust/rkv/src/value.rs @@ -0,0 +1,256 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::fmt; + +use arrayref::array_ref; +use bincode::{ + deserialize, + serialize, + serialized_size, +}; +use ordered_float::OrderedFloat; +use uuid::{ + Bytes, + Uuid, +}; + +use crate::error::DataError; + +/// We define a set of types, associated with simple integers, to annotate values stored +/// in LMDB. This is to avoid an accidental 'cast' from a value of one type to another. +/// For this reason we don't simply use `deserialize` from the `bincode` crate. +#[repr(u8)] +#[derive(Debug, PartialEq, Eq)] +pub enum Type { + Bool = 1, + U64 = 2, + I64 = 3, + F64 = 4, + Instant = 5, // Millisecond-precision timestamp. + Uuid = 6, + Str = 7, + Json = 8, + Blob = 9, +} + +/// We use manual tagging, because <https://github.com/serde-rs/serde/issues/610>. +impl Type { + pub fn from_tag(tag: u8) -> Result<Type, DataError> { + Type::from_primitive(tag).ok_or_else(|| DataError::UnknownType(tag)) + } + + #[allow(clippy::wrong_self_convention)] + pub fn to_tag(self) -> u8 { + self as u8 + } + + fn from_primitive(p: u8) -> Option<Type> { + match p { + 1 => Some(Type::Bool), + 2 => Some(Type::U64), + 3 => Some(Type::I64), + 4 => Some(Type::F64), + 5 => Some(Type::Instant), + 6 => Some(Type::Uuid), + 7 => Some(Type::Str), + 8 => Some(Type::Json), + 9 => Some(Type::Blob), + _ => None, + } + } +} + +impl fmt::Display for Type { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.write_str(match *self { + Type::Bool => "bool", + Type::U64 => "u64", + Type::I64 => "i64", + Type::F64 => "f64", + Type::Instant => "instant", + Type::Uuid => "uuid", + Type::Str => "str", + Type::Json => "json", + Type::Blob => "blob", + }) + } +} + +#[derive(Debug, Eq, PartialEq)] +pub enum Value<'v> { + Bool(bool), + U64(u64), + I64(i64), + F64(OrderedFloat<f64>), + Instant(i64), // Millisecond-precision timestamp. + Uuid(&'v Bytes), + Str(&'v str), + Json(&'v str), + Blob(&'v [u8]), +} + +#[derive(Clone, Debug, PartialEq)] +pub enum OwnedValue { + Bool(bool), + U64(u64), + I64(i64), + F64(f64), + Instant(i64), // Millisecond-precision timestamp. + Uuid(Uuid), + Str(String), + Json(String), // TODO + Blob(Vec<u8>), +} + +fn uuid(bytes: &[u8]) -> Result<Value, DataError> { + if bytes.len() == 16 { + Ok(Value::Uuid(array_ref![bytes, 0, 16])) + } else { + Err(DataError::InvalidUuid) + } +} + +impl<'v> Value<'v> { + pub fn from_tagged_slice(slice: &'v [u8]) -> Result<Value<'v>, DataError> { + let (tag, data) = slice.split_first().ok_or(DataError::Empty)?; + let t = Type::from_tag(*tag)?; + Value::from_type_and_data(t, data) + } + + fn from_type_and_data(t: Type, data: &'v [u8]) -> Result<Value<'v>, DataError> { + if t == Type::Uuid { + return deserialize(data) + .map_err(|e| { + DataError::DecodingError { + value_type: t, + err: e, + } + }) + .map(uuid)?; + } + + match t { + Type::Bool => deserialize(data).map(Value::Bool), + Type::U64 => deserialize(data).map(Value::U64), + Type::I64 => deserialize(data).map(Value::I64), + Type::F64 => deserialize(data).map(OrderedFloat).map(Value::F64), + Type::Instant => deserialize(data).map(Value::Instant), + Type::Str => deserialize(data).map(Value::Str), + Type::Json => deserialize(data).map(Value::Json), + Type::Blob => deserialize(data).map(Value::Blob), + Type::Uuid => { + // Processed above to avoid verbose duplication of error transforms. + unreachable!() + }, + } + .map_err(|e| { + DataError::DecodingError { + value_type: t, + err: e, + } + }) + } + + pub fn to_bytes(&self) -> Result<Vec<u8>, DataError> { + match self { + Value::Bool(v) => serialize(&(Type::Bool.to_tag(), *v)), + Value::U64(v) => serialize(&(Type::U64.to_tag(), *v)), + Value::I64(v) => serialize(&(Type::I64.to_tag(), *v)), + Value::F64(v) => serialize(&(Type::F64.to_tag(), v.0)), + Value::Instant(v) => serialize(&(Type::Instant.to_tag(), *v)), + Value::Str(v) => serialize(&(Type::Str.to_tag(), v)), + Value::Json(v) => serialize(&(Type::Json.to_tag(), v)), + Value::Blob(v) => serialize(&(Type::Blob.to_tag(), v)), + Value::Uuid(v) => serialize(&(Type::Uuid.to_tag(), v)), + } + .map_err(DataError::EncodingError) + } + + pub fn serialized_size(&self) -> Result<u64, DataError> { + match self { + Value::Bool(v) => serialized_size(&(Type::Bool.to_tag(), *v)), + Value::U64(v) => serialized_size(&(Type::U64.to_tag(), *v)), + Value::I64(v) => serialized_size(&(Type::I64.to_tag(), *v)), + Value::F64(v) => serialized_size(&(Type::F64.to_tag(), v.0)), + Value::Instant(v) => serialized_size(&(Type::Instant.to_tag(), *v)), + Value::Str(v) => serialized_size(&(Type::Str.to_tag(), v)), + Value::Json(v) => serialized_size(&(Type::Json.to_tag(), v)), + Value::Blob(v) => serialized_size(&(Type::Blob.to_tag(), v)), + Value::Uuid(v) => serialized_size(&(Type::Uuid.to_tag(), v)), + } + .map_err(DataError::EncodingError) + } +} + +impl<'v> From<&'v Value<'v>> for OwnedValue { + fn from(value: &Value) -> OwnedValue { + match value { + Value::Bool(v) => OwnedValue::Bool(*v), + Value::U64(v) => OwnedValue::U64(*v), + Value::I64(v) => OwnedValue::I64(*v), + Value::F64(v) => OwnedValue::F64(**v), + Value::Instant(v) => OwnedValue::Instant(*v), + Value::Uuid(v) => OwnedValue::Uuid(Uuid::from_bytes(**v)), + Value::Str(v) => OwnedValue::Str((*v).to_string()), + Value::Json(v) => OwnedValue::Json((*v).to_string()), + Value::Blob(v) => OwnedValue::Blob(v.to_vec()), + } + } +} + +impl<'v> From<&'v OwnedValue> for Value<'v> { + fn from(value: &OwnedValue) -> Value { + match value { + OwnedValue::Bool(v) => Value::Bool(*v), + OwnedValue::U64(v) => Value::U64(*v), + OwnedValue::I64(v) => Value::I64(*v), + OwnedValue::F64(v) => Value::F64(OrderedFloat::from(*v)), + OwnedValue::Instant(v) => Value::Instant(*v), + OwnedValue::Uuid(v) => Value::Uuid(v.as_bytes()), + OwnedValue::Str(v) => Value::Str(v), + OwnedValue::Json(v) => Value::Json(v), + OwnedValue::Blob(v) => Value::Blob(v), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_value_serialized_size() { + // | Value enum | tag: 1 byte | value_payload | + // |----------------------------------------------------------| + // | I64 | 1 | 8 | + // | U64 | 1 | 8 | + // | Bool | 1 | 1 | + // | Instant | 1 | 8 | + // | F64 | 1 | 8 | + // | Uuid | 1 | 16 | + // | Str/Blob/Json | 1 |(8: len + sizeof(payload))| + assert_eq!(Value::I64(-1000).serialized_size().unwrap(), 9); + assert_eq!(Value::U64(1000u64).serialized_size().unwrap(), 9); + assert_eq!(Value::Bool(true).serialized_size().unwrap(), 2); + assert_eq!(Value::Instant(1_558_020_865_224).serialized_size().unwrap(), 9); + assert_eq!(Value::F64(OrderedFloat(10000.1)).serialized_size().unwrap(), 9); + assert_eq!(Value::Str("hello!").serialized_size().unwrap(), 15); + assert_eq!(Value::Str("¡Hola").serialized_size().unwrap(), 15); + assert_eq!(Value::Blob(b"hello!").serialized_size().unwrap(), 15); + assert_eq!( + uuid(b"\x9f\xe2\xc4\xe9\x3f\x65\x4f\xdb\xb2\x4c\x02\xb1\x52\x59\x71\x6c") + .unwrap() + .serialized_size() + .unwrap(), + 17 + ); + } +} diff --git a/third_party/rust/rkv/tests/env-all.rs b/third_party/rust/rkv/tests/env-all.rs new file mode 100644 index 0000000000..325e20481e --- /dev/null +++ b/third_party/rust/rkv/tests/env-all.rs @@ -0,0 +1,187 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::fs; + +use tempfile::Builder; + +use rkv::{ + backend::{ + Lmdb, + SafeMode, + }, + Rkv, + StoreOptions, + Value, +}; + +#[test] +fn test_open_safe_same_dir_as_lmdb() { + let root = Builder::new().prefix("test_open_safe_same_dir_as_lmdb").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // Create database of type A and save to disk. + { + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&writer, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&writer, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + writer.commit().expect("committed"); + k.sync(true).expect("synced"); + } + // Verify that database of type A was written to disk. + { + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::default()).expect("opened"); + + let reader = k.read().expect("reader"); + assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Create database of type B and verify that it is empty. + { + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let _ = k.open_single("sk", StoreOptions::default()).expect_err("not opened"); + } + // Verify that database of type A wasn't changed. + { + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::default()).expect("opened"); + + let reader = k.read().expect("reader"); + assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Create database of type B and save to disk (type A exists at the same path). + { + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo1", &Value::I64(5678)).expect("wrote"); + sk.put(&mut writer, "bar1", &Value::Bool(false)).expect("wrote"); + sk.put(&mut writer, "baz1", &Value::Str("héllo~ yöu")).expect("wrote"); + assert_eq!(sk.get(&writer, "foo1").expect("read"), Some(Value::I64(5678))); + assert_eq!(sk.get(&writer, "bar1").expect("read"), Some(Value::Bool(false))); + assert_eq!(sk.get(&writer, "baz1").expect("read"), Some(Value::Str("héllo~ yöu"))); + writer.commit().expect("committed"); + k.sync(true).expect("synced"); + } + // Verify that database of type B was written to disk. + { + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::default()).expect("opened"); + + let reader = k.read().expect("reader"); + assert_eq!(sk.get(&reader, "foo1").expect("read"), Some(Value::I64(5678))); + assert_eq!(sk.get(&reader, "bar1").expect("read"), Some(Value::Bool(false))); + assert_eq!(sk.get(&reader, "baz1").expect("read"), Some(Value::Str("héllo~ yöu"))); + } + // Verify that database of type A still wasn't changed. + { + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::default()).expect("opened"); + + let reader = k.read().expect("reader"); + assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } +} + +#[test] +fn test_open_lmdb_same_dir_as_safe() { + let root = Builder::new().prefix("test_open_lmdb_same_dir_as_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // Create database of type A and save to disk. + { + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&writer, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&writer, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + writer.commit().expect("committed"); + k.sync(true).expect("synced"); + } + // Verify that database of type A was written to disk. + { + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::default()).expect("opened"); + + let reader = k.read().expect("reader"); + assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Create database of type B and verify that it is empty. + { + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let _ = k.open_single("sk", StoreOptions::default()).expect_err("not opened"); + } + // Verify that database of type A wasn't changed. + { + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::default()).expect("opened"); + + let reader = k.read().expect("reader"); + assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Create database of type B and save to disk (type A exists at the same path). + { + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo1", &Value::I64(5678)).expect("wrote"); + sk.put(&mut writer, "bar1", &Value::Bool(false)).expect("wrote"); + sk.put(&mut writer, "baz1", &Value::Str("héllo~ yöu")).expect("wrote"); + assert_eq!(sk.get(&writer, "foo1").expect("read"), Some(Value::I64(5678))); + assert_eq!(sk.get(&writer, "bar1").expect("read"), Some(Value::Bool(false))); + assert_eq!(sk.get(&writer, "baz1").expect("read"), Some(Value::Str("héllo~ yöu"))); + writer.commit().expect("committed"); + k.sync(true).expect("synced"); + } + // Verify that database of type B was written to disk. + { + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::default()).expect("opened"); + + let reader = k.read().expect("reader"); + assert_eq!(sk.get(&reader, "foo1").expect("read"), Some(Value::I64(5678))); + assert_eq!(sk.get(&reader, "bar1").expect("read"), Some(Value::Bool(false))); + assert_eq!(sk.get(&reader, "baz1").expect("read"), Some(Value::Str("héllo~ yöu"))); + } + // Verify that database of type A still wasn't changed. + { + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::default()).expect("opened"); + + let reader = k.read().expect("reader"); + assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } +} diff --git a/third_party/rust/rkv/tests/env-lmdb.rs b/third_party/rust/rkv/tests/env-lmdb.rs new file mode 100644 index 0000000000..f9376d2ccd --- /dev/null +++ b/third_party/rust/rkv/tests/env-lmdb.rs @@ -0,0 +1,1319 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +// TODO: change this back to `clippy::cognitive_complexity` when Clippy stable +// deprecates `clippy::cyclomatic_complexity`. +#![allow(clippy::complexity)] + +use std::{ + fs, + path::Path, + str, + sync::{ + Arc, + RwLock, + }, + thread, +}; + +use byteorder::{ + ByteOrder, + LittleEndian, +}; +use tempfile::Builder; + +use rkv::{ + backend::{ + BackendEnvironmentBuilder, + BackendInfo, + BackendStat, + Lmdb, + LmdbDatabase, + LmdbEnvironment, + LmdbRwTransaction, + }, + EnvironmentFlags, + Rkv, + SingleStore, + StoreError, + StoreOptions, + Value, + Writer, +}; + +fn check_rkv(k: &Rkv<LmdbEnvironment>) { + let _ = k.open_single(None, StoreOptions::create()).expect("created default"); + + let s = k.open_single("s", StoreOptions::create()).expect("opened"); + let reader = k.read().expect("reader"); + + let result = s.get(&reader, "foo"); + assert_eq!(None, result.expect("success but no value")); +} + +// The default size is 1MB. +const DEFAULT_SIZE: usize = 1024 * 1024; + +/// We can't open a directory that doesn't exist. +#[test] +fn test_open_fails() { + let root = Builder::new().prefix("test_open_fails").tempdir().expect("tempdir"); + assert!(root.path().exists()); + + let nope = root.path().join("nope/"); + assert!(!nope.exists()); + + let pb = nope.to_path_buf(); + match Rkv::new::<Lmdb>(nope.as_path()).err() { + Some(StoreError::UnsuitableEnvironmentPath(p)) => { + assert_eq!(pb, p); + }, + _ => panic!("expected error"), + }; +} + +#[test] +fn test_open() { + let root = Builder::new().prefix("test_open").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + check_rkv(&k); +} + +#[test] +fn test_open_from_builder() { + let root = Builder::new().prefix("test_open_from_builder").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let mut builder = Rkv::environment_builder::<Lmdb>(); + builder.set_max_dbs(2); + + let k = Rkv::from_builder(root.path(), builder).expect("rkv"); + check_rkv(&k); +} + +#[test] +fn test_open_from_builder_with_no_subdir_1() { + let root = Builder::new().prefix("test_open_from_builder").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + { + let mut builder = Rkv::environment_builder::<Lmdb>(); + builder.set_max_dbs(2); + + let k = Rkv::from_builder(root.path(), builder).expect("rkv"); + check_rkv(&k); + } + { + let mut builder = Rkv::environment_builder::<Lmdb>(); + builder.set_flags(EnvironmentFlags::NO_SUB_DIR); + builder.set_max_dbs(2); + + let mut datamdb = root.path().to_path_buf(); + datamdb.push("data.mdb"); + + let k = Rkv::from_builder(&datamdb, builder).expect("rkv"); + check_rkv(&k); + } +} + +#[test] +#[should_panic(expected = "rkv: UnsuitableEnvironmentPath")] +fn test_open_from_builder_with_no_subdir_2() { + let root = Builder::new().prefix("test_open_from_builder").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + { + let mut builder = Rkv::environment_builder::<Lmdb>(); + builder.set_max_dbs(2); + + let k = Rkv::from_builder(root.path(), builder).expect("rkv"); + check_rkv(&k); + } + { + let mut builder = Rkv::environment_builder::<Lmdb>(); + builder.set_flags(EnvironmentFlags::NO_SUB_DIR); + builder.set_max_dbs(2); + + let mut datamdb = root.path().to_path_buf(); + datamdb.push("bogus.mdb"); + + let k = Rkv::from_builder(&datamdb, builder).expect("rkv"); + check_rkv(&k); + } +} + +#[test] +fn test_open_from_builder_with_dir_1() { + let root = Builder::new().prefix("test_open_from_builder").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + + let mut builder = Rkv::environment_builder::<Lmdb>(); + builder.set_max_dbs(2); + builder.set_make_dir_if_needed(true); + + let k = Rkv::from_builder(root.path(), builder).expect("rkv"); + check_rkv(&k); +} + +#[test] +#[should_panic(expected = "rkv: UnsuitableEnvironmentPath(\"bogus\")")] +fn test_open_from_builder_with_dir_2() { + let root = Path::new("bogus"); + println!("Root path: {:?}", root); + assert!(!root.is_dir()); + + let mut builder = Rkv::environment_builder::<Lmdb>(); + builder.set_max_dbs(2); + + let k = Rkv::from_builder(root, builder).expect("rkv"); + check_rkv(&k); +} + +#[test] +#[should_panic(expected = "opened: DbsFull")] +fn test_create_with_capacity_1() { + let root = Builder::new().prefix("test_create_with_capacity").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<Lmdb>(root.path(), 1).expect("rkv"); + check_rkv(&k); + + // This errors with "opened: DbsFull" because we specified a capacity of one (database), + // and check_rkv already opened one (plus the default database, which doesn't count + // against the limit). + let _zzz = k.open_single("zzz", StoreOptions::create()).expect("opened"); +} + +#[test] +fn test_create_with_capacity_2() { + let root = Builder::new().prefix("test_create_with_capacity").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<Lmdb>(root.path(), 1).expect("rkv"); + check_rkv(&k); + + // This doesn't error with "opened: DbsFull" with because even though we specified a + // capacity of one (database), and check_rkv already opened one, the default database + // doesn't count against the limit. + let _zzz = k.open_single(None, StoreOptions::create()).expect("opened"); +} + +#[test] +#[should_panic(expected = "opened: DbsFull")] +fn test_open_with_capacity_1() { + let root = Builder::new().prefix("test_open_with_capacity").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<Lmdb>(root.path(), 1).expect("rkv"); + check_rkv(&k); + + let _zzz = k.open_single("zzz", StoreOptions::default()).expect("opened"); +} + +#[test] +fn test_open_with_capacity_2() { + let root = Builder::new().prefix("test_open_with_capacity").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<Lmdb>(root.path(), 1).expect("rkv"); + check_rkv(&k); + + let _zzz = k.open_single(None, StoreOptions::default()).expect("opened"); +} + +#[test] +fn test_list_dbs_1() { + let root = Builder::new().prefix("test_list_dbs").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<Lmdb>(root.path(), 1).expect("rkv"); + check_rkv(&k); + + let dbs = k.get_dbs().unwrap(); + assert_eq!(dbs, vec![Some("s".to_owned())]); +} + +#[test] +fn test_list_dbs_2() { + let root = Builder::new().prefix("test_list_dbs").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<Lmdb>(root.path(), 2).expect("rkv"); + check_rkv(&k); + + let _ = k.open_single("zzz", StoreOptions::create()).expect("opened"); + + let dbs = k.get_dbs().unwrap(); + assert_eq!(dbs, vec![Some("s".to_owned()), Some("zzz".to_owned())]); +} + +#[test] +fn test_list_dbs_3() { + let root = Builder::new().prefix("test_list_dbs").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<Lmdb>(root.path(), 0).expect("rkv"); + + let _ = k.open_single(None, StoreOptions::create()).expect("opened"); + + let dbs = k.get_dbs().unwrap(); + assert_eq!(dbs, vec![None]); +} + +fn get_larger_than_default_map_size_value() -> usize { + // The LMDB C library and lmdb Rust crate docs for setting the map size + // <http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5> + // <https://docs.rs/lmdb/0.8.0/lmdb/struct.EnvironmentBuilder.html#method.set_map_size> + // both say that the default map size is 10,485,760 bytes, i.e. 10MiB. + // + // But the DEFAULT_MAPSIZE define in the LMDB code + // https://github.com/LMDB/lmdb/blob/26c7df88e44e31623d0802a564f24781acdefde3/libraries/liblmdb/mdb.c#L729 + // sets the default map size to 1,048,576 bytes, i.e. 1MiB. + // + DEFAULT_SIZE + 1 /* 1,048,576 + 1 bytes, i.e. 1MiB + 1 byte */ +} + +#[test] +#[should_panic(expected = "wrote: MapFull")] +fn test_exceed_map_size() { + let root = Builder::new().prefix("test_exceed_map_size").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("test", StoreOptions::create()).expect("opened"); + + // Writing a large enough value should cause LMDB to fail on MapFull. + // We write a string that is larger than the default map size. + let val = "x".repeat(get_larger_than_default_map_size_value()); + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::Str(&val)).expect("wrote"); +} + +#[test] +#[should_panic(expected = "wrote: KeyValuePairBadSize")] +fn test_exceed_key_size_limit() { + let root = Builder::new().prefix("test_exceed_key_size_limit").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("test", StoreOptions::create()).expect("opened"); + + let key = "k".repeat(512); + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, key, &Value::Str("val")).expect("wrote"); +} + +#[test] +fn test_increase_map_size() { + let root = Builder::new().prefix("test_open_with_map_size").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let mut builder = Rkv::environment_builder::<Lmdb>(); + // Set the map size to the size of the value we'll store in it + 100KiB, + // which ensures that there's enough space for the value and metadata. + builder.set_map_size(get_larger_than_default_map_size_value() + 100 * 1024 /* 100KiB */); + builder.set_max_dbs(2); + let k = Rkv::from_builder(root.path(), builder).unwrap(); + let sk = k.open_single("test", StoreOptions::create()).expect("opened"); + let val = "x".repeat(get_larger_than_default_map_size_value()); + + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::Str(&val)).expect("wrote"); + writer.commit().expect("committed"); + + let reader = k.read().unwrap(); + assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::Str(&val))); +} + +#[test] +fn test_round_trip_and_transactions() { + let root = Builder::new().prefix("test_round_trip_and_transactions").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + { + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&writer, "noo").expect("read"), Some(Value::F64(1234.0.into()))); + assert_eq!(sk.get(&writer, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&writer, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + + // Isolation. Reads won't return values. + let r = &k.read().unwrap(); + assert_eq!(sk.get(r, "foo").expect("read"), None); + assert_eq!(sk.get(r, "bar").expect("read"), None); + assert_eq!(sk.get(r, "baz").expect("read"), None); + } + + // Dropped: tx rollback. Reads will still return nothing. + + { + let r = &k.read().unwrap(); + assert_eq!(sk.get(r, "foo").expect("read"), None); + assert_eq!(sk.get(r, "bar").expect("read"), None); + assert_eq!(sk.get(r, "baz").expect("read"), None); + } + + { + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&writer, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&writer, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + + writer.commit().expect("committed"); + } + + // Committed. Reads will succeed. + + { + let r = k.read().unwrap(); + assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&r, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + + { + let mut writer = k.write().expect("writer"); + sk.delete(&mut writer, "foo").expect("deleted"); + sk.delete(&mut writer, "bar").expect("deleted"); + sk.delete(&mut writer, "baz").expect("deleted"); + assert_eq!(sk.get(&writer, "foo").expect("read"), None); + assert_eq!(sk.get(&writer, "bar").expect("read"), None); + assert_eq!(sk.get(&writer, "baz").expect("read"), None); + + // Isolation. Reads still return values. + let r = k.read().unwrap(); + assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&r, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + + // Dropped: tx rollback. Reads will still return values. + + { + let r = k.read().unwrap(); + assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&r, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + + { + let mut writer = k.write().expect("writer"); + sk.delete(&mut writer, "foo").expect("deleted"); + sk.delete(&mut writer, "bar").expect("deleted"); + sk.delete(&mut writer, "baz").expect("deleted"); + assert_eq!(sk.get(&writer, "foo").expect("read"), None); + assert_eq!(sk.get(&writer, "bar").expect("read"), None); + assert_eq!(sk.get(&writer, "baz").expect("read"), None); + + writer.commit().expect("committed"); + } + + // Committed. Reads will succeed but return None to indicate a missing value. + + { + let r = k.read().unwrap(); + assert_eq!(sk.get(&r, "foo").expect("read"), None); + assert_eq!(sk.get(&r, "bar").expect("read"), None); + assert_eq!(sk.get(&r, "baz").expect("read"), None); + } +} + +#[test] +fn test_single_store_clear() { + let root = Builder::new().prefix("test_single_store_clear").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + { + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + sk.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + } + + { + let r = k.read().unwrap(); + let iter = sk.iter_start(&r).expect("iter"); + assert_eq!(iter.count(), 0); + } +} + +#[test] +#[should_panic(expected = "KeyValuePairNotFound")] +fn test_single_store_delete_nonexistent() { + let root = Builder::new().prefix("test_single_store_delete_nonexistent").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + sk.delete(&mut writer, "bogus").unwrap(); +} + +#[test] +#[cfg(feature = "db-dup-sort")] +fn test_multi_put_get_del() { + let root = Builder::new().prefix("test_multi_put_get_del").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let multistore = k.open_multi("multistore", StoreOptions::create()).unwrap(); + + let mut writer = k.write().unwrap(); + multistore.put(&mut writer, "str1", &Value::Str("str1 foo")).unwrap(); + multistore.put(&mut writer, "str1", &Value::Str("str1 bar")).unwrap(); + multistore.put(&mut writer, "str2", &Value::Str("str2 foo")).unwrap(); + multistore.put(&mut writer, "str2", &Value::Str("str2 bar")).unwrap(); + multistore.put(&mut writer, "str3", &Value::Str("str3 foo")).unwrap(); + multistore.put(&mut writer, "str3", &Value::Str("str3 bar")).unwrap(); + writer.commit().unwrap(); + + let writer = k.write().unwrap(); + { + let mut iter = multistore.get(&writer, "str1").unwrap(); + let (id, val) = iter.next().unwrap().unwrap(); + assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 bar"))); + let (id, val) = iter.next().unwrap().unwrap(); + assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 foo"))); + } + writer.commit().unwrap(); + + let mut writer = k.write().unwrap(); + multistore.delete(&mut writer, "str1", &Value::Str("str1 foo")).unwrap(); + assert_eq!(multistore.get_first(&writer, "str1").unwrap(), Some(Value::Str("str1 bar"))); + multistore.delete(&mut writer, "str2", &Value::Str("str2 bar")).unwrap(); + assert_eq!(multistore.get_first(&writer, "str2").unwrap(), Some(Value::Str("str2 foo"))); + multistore.delete_all(&mut writer, "str3").unwrap(); + assert_eq!(multistore.get_first(&writer, "str3").unwrap(), None); + writer.commit().unwrap(); +} + +#[test] +#[cfg(feature = "db-dup-sort")] +fn test_multiple_store_clear() { + let root = Builder::new().prefix("test_multiple_store_clear").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let multistore = k.open_multi("multistore", StoreOptions::create()).expect("opened"); + + { + let mut writer = k.write().expect("writer"); + multistore.put(&mut writer, "str1", &Value::Str("str1 foo")).unwrap(); + multistore.put(&mut writer, "str1", &Value::Str("str1 bar")).unwrap(); + multistore.put(&mut writer, "str2", &Value::Str("str2 foo")).unwrap(); + multistore.put(&mut writer, "str2", &Value::Str("str2 bar")).unwrap(); + multistore.put(&mut writer, "str3", &Value::Str("str3 foo")).unwrap(); + multistore.put(&mut writer, "str3", &Value::Str("str3 bar")).unwrap(); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + multistore.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + } + + { + let r = k.read().unwrap(); + assert_eq!(multistore.get_first(&r, "str1").expect("read"), None); + assert_eq!(multistore.get_first(&r, "str2").expect("read"), None); + assert_eq!(multistore.get_first(&r, "str3").expect("read"), None); + } +} + +#[test] +fn test_open_store_for_read() { + let root = Builder::new().prefix("test_open_store_for_read").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + + // First create the store, and start a write transaction on it. + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::Str("bar")).expect("write"); + + // Open the same store for read, note that the write transaction is still in progress, + // it should not block the reader though. + let sk_readonly = k.open_single("sk", StoreOptions::default()).expect("opened"); + writer.commit().expect("commit"); + + // Now the write transaction is committed, any followed reads should see its change. + let reader = k.read().expect("reader"); + assert_eq!(sk_readonly.get(&reader, "foo").expect("read"), Some(Value::Str("bar"))); +} + +#[test] +#[should_panic(expected = "open a missing store")] +fn test_open_a_missing_store() { + let root = Builder::new().prefix("test_open_a_missing_store").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let _sk = k.open_single("sk", StoreOptions::default()).expect("open a missing store"); +} + +#[test] +#[should_panic(expected = "new failed: FileInvalid")] +fn test_open_a_broken_store() { + let root = Builder::new().prefix("test_open_a_missing_store").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let dbfile = root.path().join("data.mdb"); + fs::write(dbfile, "bogus").expect("dbfile created"); + + let _ = Rkv::new::<Lmdb>(root.path()).expect("new failed"); +} + +#[test] +fn test_open_fail_with_badrslot() { + let root = Builder::new().prefix("test_open_fail_with_badrslot").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + + // First create the store + let _sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + // Open a reader on this store + let _reader = k.read().expect("reader"); + + // Open the same store for read while the reader is in progress will panic + let store = k.open_single("sk", StoreOptions::default()); + match store { + Err(StoreError::OpenAttemptedDuringTransaction(_thread_id)) => (), + _ => panic!("should panic"), + } +} + +#[test] +fn test_read_before_write_num() { + let root = Builder::new().prefix("test_read_before_write_num").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + // Test reading a number, modifying it, and then writing it back. + // We have to be done with the Value::I64 before calling Writer::put, + // as the Value::I64 borrows an immutable reference to the Writer. + // So we extract and copy its primitive value. + + fn get_existing_foo(store: SingleStore<LmdbDatabase>, writer: &Writer<LmdbRwTransaction>) -> Option<i64> { + match store.get(writer, "foo").expect("read") { + Some(Value::I64(val)) => Some(val), + _ => None, + } + } + + let mut writer = k.write().expect("writer"); + let mut existing = get_existing_foo(sk, &writer).unwrap_or(99); + existing += 1; + sk.put(&mut writer, "foo", &Value::I64(existing)).expect("success"); + + let updated = get_existing_foo(sk, &writer).unwrap_or(99); + assert_eq!(updated, 100); + writer.commit().expect("commit"); +} + +#[test] +fn test_read_before_write_str() { + let root = Builder::new().prefix("test_read_before_write_str").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + // Test reading a string, modifying it, and then writing it back. + // We have to be done with the Value::Str before calling Writer::put, + // as the Value::Str (and its underlying &str) borrows an immutable + // reference to the Writer. So we copy it to a String. + + fn get_existing_foo(store: SingleStore<LmdbDatabase>, writer: &Writer<LmdbRwTransaction>) -> Option<String> { + match store.get(writer, "foo").expect("read") { + Some(Value::Str(val)) => Some(val.to_string()), + _ => None, + } + } + + let mut writer = k.write().expect("writer"); + let mut existing = get_existing_foo(sk, &writer).unwrap_or_default(); + existing.push('…'); + sk.put(&mut writer, "foo", &Value::Str(&existing)).expect("write"); + + let updated = get_existing_foo(sk, &writer).unwrap_or_default(); + assert_eq!(updated, "…"); + writer.commit().expect("commit"); +} + +#[test] +fn test_concurrent_read_transactions_prohibited() { + let root = Builder::new().prefix("test_concurrent_reads_prohibited").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let _first = k.read().expect("reader"); + let second = k.read(); + + match second { + Err(StoreError::ReadTransactionAlreadyExists(t)) => { + println!("Thread was {:?}", t); + }, + Err(e) => { + println!("Got error {:?}", e); + }, + _ => { + panic!("Expected error."); + }, + } +} + +#[test] +fn test_isolation() { + let root = Builder::new().prefix("test_isolation").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_single("s", StoreOptions::create()).expect("opened"); + + // Add one field. + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + writer.commit().expect("committed"); + } + + { + let reader = k.read().unwrap(); + assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + } + + // Establish a long-lived reader that outlasts a writer. + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + + // Start a write transaction. + let mut writer = k.write().expect("writer"); + s.put(&mut writer, "foo", &Value::I64(999)).expect("wrote"); + + // The reader and writer are isolated. + assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(s.get(&writer, "foo").expect("read"), Some(Value::I64(999))); + + // If we commit the writer, we still have isolation. + writer.commit().expect("committed"); + assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + + // A new reader sees the committed value. Note that LMDB doesn't allow two + // read transactions to exist in the same thread, so we abort the previous one. + reader.abort(); + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(999))); +} + +#[test] +fn test_blob() { + let root = Builder::new().prefix("test_round_trip_blob").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + assert_eq!(sk.get(&writer, "foo").expect("read"), None); + sk.put(&mut writer, "foo", &Value::Blob(&[1, 2, 3, 4])).expect("wrote"); + assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::Blob(&[1, 2, 3, 4]))); + + fn u16_to_u8(src: &[u16]) -> Vec<u8> { + let mut dst = vec![0; 2 * src.len()]; + LittleEndian::write_u16_into(src, &mut dst); + dst + } + + fn u8_to_u16(src: &[u8]) -> Vec<u16> { + let mut dst = vec![0; src.len() / 2]; + LittleEndian::read_u16_into(src, &mut dst); + dst + } + + // When storing UTF-16 strings as blobs, we'll need to convert + // their [u16] backing storage to [u8]. Test that converting, writing, + // reading, and converting back works as expected. + let u16_array = [1000, 10000, 54321, 65535]; + assert_eq!(sk.get(&writer, "bar").expect("read"), None); + sk.put(&mut writer, "bar", &Value::Blob(&u16_to_u8(&u16_array))).expect("wrote"); + let u8_array = match sk.get(&writer, "bar").expect("read") { + Some(Value::Blob(val)) => val, + _ => &[], + }; + assert_eq!(u8_to_u16(u8_array), u16_array); +} + +#[test] +fn test_sync() { + let root = Builder::new().prefix("test_sync").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let mut builder = Rkv::environment_builder::<Lmdb>(); + builder.set_max_dbs(1); + builder.set_flags(EnvironmentFlags::NO_SYNC); + { + let k = Rkv::from_builder(root.path(), builder).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + { + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + writer.commit().expect("committed"); + k.sync(true).expect("synced"); + } + } + let k = Rkv::from_builder(root.path(), builder).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::default()).expect("opened"); + let reader = k.read().expect("reader"); + assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); +} + +#[test] +#[cfg(feature = "db-int-key")] +fn test_stat() { + let root = Builder::new().prefix("test_stat").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + for i in 0..5 { + let sk = k.open_integer(&format!("sk{}", i)[..], StoreOptions::create()).expect("opened"); + { + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, i, &Value::I64(i64::from(i))).expect("wrote"); + writer.commit().expect("committed"); + } + } + assert_eq!(k.stat().expect("stat").depth(), 1); + assert_eq!(k.stat().expect("stat").entries(), 5); + assert_eq!(k.stat().expect("stat").branch_pages(), 0); + assert_eq!(k.stat().expect("stat").leaf_pages(), 1); +} + +#[test] +fn test_info() { + let root = Builder::new().prefix("test_info").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote"); + writer.commit().expect("commited"); + + let info = k.info().expect("info"); + + // The default size is 1MB. + assert_eq!(info.map_size(), DEFAULT_SIZE); + // Should greater than 0 after the write txn. + assert!(info.last_pgno() > 0); + // A txn to open_single + a txn to write. + assert_eq!(info.last_txnid(), 2); + // The default max readers is 126. + assert_eq!(info.max_readers(), 126); + assert_eq!(info.num_readers(), 0); + + // A new reader should increment the reader counter. + let _reader = k.read().expect("reader"); + let info = k.info().expect("info"); + + assert_eq!(info.num_readers(), 1); +} + +#[test] +fn test_load_ratio() { + let root = Builder::new().prefix("test_load_ratio").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote"); + writer.commit().expect("commited"); + let ratio = k.load_ratio().expect("ratio").unwrap(); + assert!(ratio > 0.0_f32 && ratio < 1.0_f32); + + // Put data to database should increase the load ratio. + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "bar", &Value::Str(&"more-than-4KB".repeat(1000))).expect("wrote"); + writer.commit().expect("commited"); + let new_ratio = k.load_ratio().expect("ratio").unwrap(); + assert!(new_ratio > ratio); + + // Clear the database so that all the used pages should go to freelist, hence the ratio + // should decrease. + let mut writer = k.write().expect("writer"); + sk.clear(&mut writer).expect("clear"); + writer.commit().expect("commited"); + let after_clear_ratio = k.load_ratio().expect("ratio").unwrap(); + assert!(after_clear_ratio < new_ratio); +} + +#[test] +fn test_set_map_size() { + let root = Builder::new().prefix("test_size_map_size").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + assert_eq!(k.info().expect("info").map_size(), DEFAULT_SIZE); + + k.set_map_size(2 * DEFAULT_SIZE).expect("resized"); + + // Should be able to write. + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote"); + writer.commit().expect("commited"); + + assert_eq!(k.info().expect("info").map_size(), 2 * DEFAULT_SIZE); +} + +#[test] +fn test_iter() { + let root = Builder::new().prefix("test_iter").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + // An iterator over an empty store returns no values. + { + let reader = k.read().unwrap(); + let mut iter = sk.iter_start(&reader).unwrap(); + assert!(iter.next().is_none()); + } + + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + sk.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")).expect("wrote"); + sk.put(&mut writer, "你好,遊客", &Value::Str("米克規則")).expect("wrote"); + writer.commit().expect("committed"); + + let reader = k.read().unwrap(); + + // Reader.iter() returns (key, value) tuples ordered by key. + let mut iter = sk.iter_start(&reader).unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "bar"); + assert_eq!(val, Value::Bool(true)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "baz"); + assert_eq!(val, Value::Str("héllo, yöu")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "foo"); + assert_eq!(val, Value::I64(1234)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); + assert_eq!(val, Value::Str("Emil.RuleZ!")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterators don't loop. Once one returns None, additional calls + // to its next() method will always return None. + assert!(iter.next().is_none()); + + // Reader.iter_from() begins iteration at the first key equal to + // or greater than the given key. + let mut iter = sk.iter_from(&reader, "moo").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Reader.iter_from() works as expected when the given key is a prefix + // of a key in the store. + let mut iter = sk.iter_from(&reader, "no").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); +} + +#[test] +fn test_iter_from_key_greater_than_existing() { + let root = Builder::new().prefix("test_iter_from_key_greater_than_existing").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + writer.commit().expect("committed"); + + let reader = k.read().unwrap(); + let mut iter = sk.iter_from(&reader, "nuu").unwrap(); + assert!(iter.next().is_none()); +} + +#[test] +fn test_multiple_store_read_write() { + let root = Builder::new().prefix("test_multiple_store_read_write").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let s1 = k.open_single("store_1", StoreOptions::create()).expect("opened"); + let s2 = k.open_single("store_2", StoreOptions::create()).expect("opened"); + let s3 = k.open_single("store_3", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + s1.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote"); + s2.put(&mut writer, "foo", &Value::I64(123)).expect("wrote"); + s3.put(&mut writer, "foo", &Value::Bool(true)).expect("wrote"); + + assert_eq!(s1.get(&writer, "foo").expect("read"), Some(Value::Str("bar"))); + assert_eq!(s2.get(&writer, "foo").expect("read"), Some(Value::I64(123))); + assert_eq!(s3.get(&writer, "foo").expect("read"), Some(Value::Bool(true))); + + writer.commit().expect("committed"); + + let reader = k.read().expect("unbound_reader"); + assert_eq!(s1.get(&reader, "foo").expect("read"), Some(Value::Str("bar"))); + assert_eq!(s2.get(&reader, "foo").expect("read"), Some(Value::I64(123))); + assert_eq!(s3.get(&reader, "foo").expect("read"), Some(Value::Bool(true))); + reader.abort(); + + // test delete across multiple stores + let mut writer = k.write().expect("writer"); + s1.delete(&mut writer, "foo").expect("deleted"); + s2.delete(&mut writer, "foo").expect("deleted"); + s3.delete(&mut writer, "foo").expect("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s1.get(&reader, "key").expect("value"), None); + assert_eq!(s2.get(&reader, "key").expect("value"), None); + assert_eq!(s3.get(&reader, "key").expect("value"), None); +} + +#[test] +fn test_multiple_store_iter() { + let root = Builder::new().prefix("test_multiple_store_iter").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let s1 = k.open_single("store_1", StoreOptions::create()).expect("opened"); + let s2 = k.open_single("store_2", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + // Write to "s1" + s1.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + s1.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote"); + s1.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + s1.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + s1.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")).expect("wrote"); + s1.put(&mut writer, "你好,遊客", &Value::Str("米克規則")).expect("wrote"); + // &mut writer to "s2" + s2.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + s2.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote"); + s2.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + s2.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + s2.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")).expect("wrote"); + s2.put(&mut writer, "你好,遊客", &Value::Str("米克規則")).expect("wrote"); + writer.commit().expect("committed"); + + let reader = k.read().unwrap(); + + // Iterate through the whole store in "s1" + let mut iter = s1.iter_start(&reader).unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "bar"); + assert_eq!(val, Value::Bool(true)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "baz"); + assert_eq!(val, Value::Str("héllo, yöu")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "foo"); + assert_eq!(val, Value::I64(1234)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); + assert_eq!(val, Value::Str("Emil.RuleZ!")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterate through the whole store in "s2" + let mut iter = s2.iter_start(&reader).unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "bar"); + assert_eq!(val, Value::Bool(true)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "baz"); + assert_eq!(val, Value::Str("héllo, yöu")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "foo"); + assert_eq!(val, Value::I64(1234)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); + assert_eq!(val, Value::Str("Emil.RuleZ!")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterate from a given key in "s1" + let mut iter = s1.iter_from(&reader, "moo").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterate from a given key in "s2" + let mut iter = s2.iter_from(&reader, "moo").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterate from a given prefix in "s1" + let mut iter = s1.iter_from(&reader, "no").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterate from a given prefix in "s2" + let mut iter = s2.iter_from(&reader, "no").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); +} + +#[test] +fn test_store_multiple_thread() { + let root = Builder::new().prefix("test_multiple_thread").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let rkv_arc = Arc::new(RwLock::new(Rkv::new::<Lmdb>(root.path()).expect("new succeeded"))); + let store = rkv_arc.read().unwrap().open_single("test", StoreOptions::create()).expect("opened"); + + let num_threads = 10; + let mut write_handles = Vec::with_capacity(num_threads as usize); + let mut read_handles = Vec::with_capacity(num_threads as usize); + + // Note that this isn't intended to demonstrate a good use of threads. + // For this shape of data, it would be more performant to write/read + // all values using one transaction in a single thread. The point here + // is just to confirm that a store can be shared by multiple threads. + + // For each KV pair, spawn a thread that writes it to the store. + for i in 0..num_threads { + let rkv_arc = rkv_arc.clone(); + write_handles.push(thread::spawn(move || { + let rkv = rkv_arc.write().expect("rkv"); + let mut writer = rkv.write().expect("writer"); + store.put(&mut writer, i.to_string(), &Value::U64(i)).expect("written"); + writer.commit().unwrap(); + })); + } + for handle in write_handles { + handle.join().expect("joined"); + } + + // For each KV pair, spawn a thread that reads it from the store + // and returns its value. + for i in 0..num_threads { + let rkv_arc = rkv_arc.clone(); + read_handles.push(thread::spawn(move || { + let rkv = rkv_arc.read().expect("rkv"); + let reader = rkv.read().expect("reader"); + let value = match store.get(&reader, i.to_string()) { + Ok(Some(Value::U64(value))) => value, + Ok(Some(_)) => panic!("value type unexpected"), + Ok(None) => panic!("value not found"), + Err(err) => panic!(err), + }; + assert_eq!(value, i); + value + })); + } + + // Sum the values returned from the threads and confirm that they're + // equal to the sum of values written to the threads. + let thread_sum: u64 = read_handles.into_iter().map(|handle| handle.join().expect("value")).sum(); + assert_eq!(thread_sum, (0..num_threads).sum()); +} + +#[test] +fn test_use_value_as_key() { + let root = Builder::new().prefix("test_use_value_as_key").tempdir().expect("tempdir"); + let rkv = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let store = rkv.open_single("store", StoreOptions::create()).expect("opened"); + + { + let mut writer = rkv.write().expect("writer"); + store.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote"); + store.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote"); + writer.commit().expect("committed"); + } + + // It's possible to retrieve a value with a Reader and then use it + // as a key with a Writer. + { + let reader = &rkv.read().unwrap(); + if let Some(Value::Str(key)) = store.get(reader, "foo").expect("read") { + let mut writer = rkv.write().expect("writer"); + store.delete(&mut writer, key).expect("deleted"); + writer.commit().expect("committed"); + } + } + + { + let mut writer = rkv.write().expect("writer"); + store.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote"); + writer.commit().expect("committed"); + } + + // You can also retrieve a Value with a Writer and then use it as a key + // with the same Writer if you copy the value to an owned type + // so the Writer isn't still being borrowed by the retrieved value + // when you try to borrow the Writer again to modify that value. + { + let mut writer = rkv.write().expect("writer"); + if let Some(Value::Str(value)) = store.get(&writer, "foo").expect("read") { + let key = value.to_owned(); + store.delete(&mut writer, key).expect("deleted"); + writer.commit().expect("committed"); + } + } + + { + let name1 = rkv.open_single("name1", StoreOptions::create()).expect("opened"); + let name2 = rkv.open_single("name2", StoreOptions::create()).expect("opened"); + let mut writer = rkv.write().expect("writer"); + name1.put(&mut writer, "key1", &Value::Str("bar")).expect("wrote"); + name1.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote"); + name2.put(&mut writer, "key2", &Value::Str("bar")).expect("wrote"); + name2.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote"); + writer.commit().expect("committed"); + } + + // You can also iterate (store, key) pairs to retrieve foreign keys, + // then iterate those foreign keys to modify/delete them. + // + // You need to open the stores in advance, since opening a store + // uses a write transaction internally, so opening them while a writer + // is extant will hang. + // + // And you need to copy the values to an owned type so the Writer isn't + // still being borrowed by a retrieved value when you try to borrow + // the Writer again to modify another value. + let fields = vec![ + (rkv.open_single("name1", StoreOptions::create()).expect("opened"), "key1"), + (rkv.open_single("name2", StoreOptions::create()).expect("opened"), "key2"), + ]; + { + let mut foreignkeys = Vec::new(); + let mut writer = rkv.write().expect("writer"); + for (store, key) in fields.iter() { + if let Some(Value::Str(value)) = store.get(&writer, key).expect("read") { + foreignkeys.push((store, value.to_owned())); + } + } + for (store, key) in foreignkeys.iter() { + store.delete(&mut writer, key).expect("deleted"); + } + writer.commit().expect("committed"); + } +} diff --git a/third_party/rust/rkv/tests/env-migration.rs b/third_party/rust/rkv/tests/env-migration.rs new file mode 100644 index 0000000000..835f2a645f --- /dev/null +++ b/third_party/rust/rkv/tests/env-migration.rs @@ -0,0 +1,471 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + fs, + path::Path, +}; + +use tempfile::Builder; + +use rkv::{ + backend::{ + Lmdb, + LmdbEnvironment, + SafeMode, + SafeModeEnvironment, + }, + Manager, + Migrator, + Rkv, + StoreOptions, + Value, +}; + +macro_rules! populate_store { + ($env:expr) => { + let store = $env.open_single("store", StoreOptions::create()).expect("opened"); + let mut writer = $env.write().expect("writer"); + store.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + store.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + store.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + writer.commit().expect("committed"); + }; +} + +#[test] +fn test_open_migrator_lmdb_to_safe() { + let root = Builder::new().prefix("test_open_migrator_lmdb_to_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // Populate source environment and persist to disk. + { + let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + populate_store!(&src_env); + src_env.sync(true).expect("synced"); + } + // Check if the files were written to disk. + { + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + assert!(datamdb.exists()); + assert!(lockmdb.exists()); + } + // Verify that database was written to disk. + { + let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let store = src_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = src_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Open and migrate. + { + let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env).expect("migrated"); + } + // Verify that the database was indeed migrated. + { + let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let store = dst_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = dst_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Check if the old files were deleted from disk. + { + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + assert!(!datamdb.exists()); + assert!(!lockmdb.exists()); + } +} + +#[test] +fn test_open_migrator_safe_to_lmdb() { + let root = Builder::new().prefix("test_open_migrator_safe_to_lmdb").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // Populate source environment and persist to disk. + { + let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + populate_store!(&src_env); + src_env.sync(true).expect("synced"); + } + // Check if the files were written to disk. + { + let mut safebin = root.path().to_path_buf(); + safebin.push("data.safe.bin"); + assert!(safebin.exists()); + } + // Verify that database was written to disk. + { + let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let store = src_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = src_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Open and migrate. + { + let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env).expect("migrated"); + } + // Verify that the database was indeed migrated. + { + let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let store = dst_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = dst_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Check if the old files were deleted from disk. + { + let mut safebin = root.path().to_path_buf(); + safebin.push("data.safe.bin"); + assert!(!safebin.exists()); + } +} + +#[test] +fn test_open_migrator_round_trip() { + let root = Builder::new().prefix("test_open_migrator_lmdb_to_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // Populate source environment and persist to disk. + { + let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + populate_store!(&src_env); + src_env.sync(true).expect("synced"); + } + // Open and migrate. + { + let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env).expect("migrated"); + } + // Open and migrate back. + { + let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env).expect("migrated"); + } + // Verify that the database was indeed migrated twice. + { + let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let store = dst_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = dst_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + // Check if the right files are finally present on disk. + { + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + let mut safebin = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + safebin.push("data.safe.bin"); + assert!(datamdb.exists()); + assert!(lockmdb.exists()); + assert!(!safebin.exists()); + } +} + +#[test] +fn test_easy_migrator_no_dir_1() { + let root = Builder::new().prefix("test_easy_migrator_no_dir").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // This won't fail with IoError even though the path is a bogus path, because this + // is the "easy mode" migration which automatically handles (ignores) this error. + let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + Migrator::easy_migrate_lmdb_to_safe_mode(Path::new("bogus"), &dst_env).expect("migrated"); + + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + let mut safebin = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + safebin.push("data.safe.bin"); + assert!(!datamdb.exists()); + assert!(!lockmdb.exists()); + assert!(!safebin.exists()); // safe mode doesn't write an empty db to disk +} + +#[test] +fn test_easy_migrator_no_dir_2() { + let root = Builder::new().prefix("test_easy_migrator_no_dir").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // This won't fail with IoError even though the path is a bogus path, because this + // is the "easy mode" migration which automatically handles (ignores) this error. + let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + Migrator::easy_migrate_safe_mode_to_lmdb(Path::new("bogus"), &dst_env).expect("migrated"); + + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + let mut safebin = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + safebin.push("data.safe.bin"); + assert!(datamdb.exists()); // lmdb writes an empty db to disk + assert!(lockmdb.exists()); + assert!(!safebin.exists()); +} + +#[test] +fn test_easy_migrator_invalid_1() { + let root = Builder::new().prefix("test_easy_migrator_invalid").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let dbfile = root.path().join("data.mdb"); + fs::write(dbfile, "bogus").expect("dbfile created"); + + // This won't fail with FileInvalid even though the database is a bogus file, because this + // is the "easy mode" migration which automatically handles (ignores) this error. + let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated"); + + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + let mut safebin = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + safebin.push("data.safe.bin"); + assert!(datamdb.exists()); // corrupted db isn't deleted + assert!(lockmdb.exists()); + assert!(!safebin.exists()); +} + +#[test] +fn test_easy_migrator_invalid_2() { + let root = Builder::new().prefix("test_easy_migrator_invalid").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let dbfile = root.path().join("data.safe.bin"); + fs::write(dbfile, "bogus").expect("dbfile created"); + + // This won't fail with FileInvalid even though the database is a bogus file, because this + // is the "easy mode" migration which automatically handles (ignores) this error. + let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated"); + + let mut datamdb = root.path().to_path_buf(); + let mut lockmdb = root.path().to_path_buf(); + let mut safebin = root.path().to_path_buf(); + datamdb.push("data.mdb"); + lockmdb.push("lock.mdb"); + safebin.push("data.safe.bin"); + assert!(datamdb.exists()); // lmdb writes an empty db to disk + assert!(lockmdb.exists()); + assert!(safebin.exists()); // corrupted db isn't deleted +} + +#[test] +#[should_panic(expected = "migrated: SourceEmpty")] +fn test_migrator_lmdb_to_safe_1() { + let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated"); +} + +#[test] +#[should_panic(expected = "migrated: DestinationNotEmpty")] +fn test_migrator_lmdb_to_safe_2() { + let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + populate_store!(&src_env); + let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + populate_store!(&dst_env); + Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated"); +} + +#[test] +fn test_migrator_lmdb_to_safe_3() { + let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + populate_store!(&src_env); + let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated"); + + let store = dst_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = dst_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); +} + +#[test] +#[should_panic(expected = "migrated: SourceEmpty")] +fn test_migrator_safe_to_lmdb_1() { + let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated"); +} + +#[test] +#[should_panic(expected = "migrated: DestinationNotEmpty")] +fn test_migrator_safe_to_lmdb_2() { + let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + populate_store!(&src_env); + let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + populate_store!(&dst_env); + Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated"); +} + +#[test] +fn test_migrator_safe_to_lmdb_3() { + let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + populate_store!(&src_env); + let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated"); + + let store = dst_env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = dst_env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); +} + +#[test] +fn test_easy_migrator_failed_migration_1() { + let root = Builder::new().prefix("test_easy_migrator_failed_migration_1").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let dbfile = root.path().join("data.mdb"); + fs::write(&dbfile, "bogus").expect("bogus dbfile created"); + + // This won't fail with FileInvalid even though the database is a bogus file, because this + // is the "easy mode" migration which automatically handles (ignores) this error. + let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated"); + + // Populate destination environment and persist to disk. + populate_store!(&dst_env); + dst_env.sync(true).expect("synced"); + + // Delete bogus file and create a valid source environment in its place. + fs::remove_file(&dbfile).expect("bogus dbfile removed"); + let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + populate_store!(&src_env); + src_env.sync(true).expect("synced"); + + // Attempt to migrate again. This should *NOT* fail with DestinationNotEmpty. + Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated"); +} + +#[test] +fn test_easy_migrator_failed_migration_2() { + let root = Builder::new().prefix("test_easy_migrator_failed_migration_2").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let dbfile = root.path().join("data.safe.bin"); + fs::write(&dbfile, "bogus").expect("bogus dbfile created"); + + // This won't fail with FileInvalid even though the database is a bogus file, because this + // is the "easy mode" migration which automatically handles (ignores) this error. + let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated"); + + // Populate destination environment and persist to disk. + populate_store!(&dst_env); + dst_env.sync(true).expect("synced"); + + // Delete bogus file and create a valid source environment in its place. + fs::remove_file(&dbfile).expect("bogus dbfile removed"); + let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + populate_store!(&src_env); + src_env.sync(true).expect("synced"); + + // Attempt to migrate again. This should *NOT* fail with DestinationNotEmpty. + Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated"); +} + +fn test_easy_migrator_from_manager_failed_migration_1() { + let root = Builder::new().prefix("test_easy_migrator_from_manager_failed_migration_1").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + { + let mut src_manager = Manager::<LmdbEnvironment>::singleton().write().unwrap(); + let created_src_arc = src_manager.get_or_create(root.path(), Rkv::new::<Lmdb>).unwrap(); + let src_env = created_src_arc.read().unwrap(); + populate_store!(&src_env); + src_env.sync(true).expect("synced"); + } + { + let mut dst_manager = Manager::<SafeModeEnvironment>::singleton().write().unwrap(); + let created_dst_arc_1 = dst_manager.get_or_create(root.path(), Rkv::new::<SafeMode>).unwrap(); + let dst_env_1 = created_dst_arc_1.read().unwrap(); + populate_store!(&dst_env_1); + dst_env_1.sync(true).expect("synced"); + } + + // Attempt to migrate again in a new env. This should *NOT* fail with DestinationNotEmpty. + let dst_manager = Manager::<SafeModeEnvironment>::singleton().read().unwrap(); + let created_dst_arc_2 = dst_manager.get(root.path()).unwrap().unwrap(); + let dst_env_2 = created_dst_arc_2.read().unwrap(); + Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), dst_env_2).expect("migrated"); +} + +fn test_easy_migrator_from_manager_failed_migration_2() { + let root = Builder::new().prefix("test_easy_migrator_from_manager_failed_migration_2").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + { + let mut src_manager = Manager::<SafeModeEnvironment>::singleton().write().unwrap(); + let created_src_arc = src_manager.get_or_create(root.path(), Rkv::new::<SafeMode>).unwrap(); + let src_env = created_src_arc.read().unwrap(); + populate_store!(&src_env); + src_env.sync(true).expect("synced"); + } + { + let mut dst_manager = Manager::<LmdbEnvironment>::singleton().write().unwrap(); + let created_dst_arc_1 = dst_manager.get_or_create(root.path(), Rkv::new::<Lmdb>).unwrap(); + let dst_env_1 = created_dst_arc_1.read().unwrap(); + populate_store!(&dst_env_1); + dst_env_1.sync(true).expect("synced"); + } + + // Attempt to migrate again in a new env. This should *NOT* fail with DestinationNotEmpty. + let dst_manager = Manager::<LmdbEnvironment>::singleton().read().unwrap(); + let created_dst_arc_2 = dst_manager.get(root.path()).unwrap().unwrap(); + let dst_env_2 = created_dst_arc_2.read().unwrap(); + Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), dst_env_2).expect("migrated"); +} + +#[test] +fn test_easy_migrator_from_manager_failed_migration() { + test_easy_migrator_from_manager_failed_migration_1(); + test_easy_migrator_from_manager_failed_migration_2(); +} diff --git a/third_party/rust/rkv/tests/env-safe.rs b/third_party/rust/rkv/tests/env-safe.rs new file mode 100644 index 0000000000..65dad4014e --- /dev/null +++ b/third_party/rust/rkv/tests/env-safe.rs @@ -0,0 +1,1090 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +// TODO: change this back to `clippy::cognitive_complexity` when Clippy stable +// deprecates `clippy::cyclomatic_complexity`. +#![allow(clippy::complexity)] + +use std::{ + fs, + path::Path, + str, + sync::{ + Arc, + RwLock, + }, + thread, +}; + +use byteorder::{ + ByteOrder, + LittleEndian, +}; +use tempfile::Builder; + +use rkv::{ + backend::{ + BackendEnvironmentBuilder, + SafeMode, + SafeModeDatabase, + SafeModeEnvironment, + SafeModeRwTransaction, + }, + Rkv, + SingleStore, + StoreError, + StoreOptions, + Value, + Writer, +}; + +fn check_rkv(k: &Rkv<SafeModeEnvironment>) { + let _ = k.open_single(None, StoreOptions::create()).expect("created default"); + + let s = k.open_single("s", StoreOptions::create()).expect("opened"); + let reader = k.read().expect("reader"); + + let result = s.get(&reader, "foo"); + assert_eq!(None, result.expect("success but no value")); +} + +/// We can't open a directory that doesn't exist. +#[test] +fn test_open_fails_safe() { + let root = Builder::new().prefix("test_open_fails_safe").tempdir().expect("tempdir"); + assert!(root.path().exists()); + + let nope = root.path().join("nope/"); + assert!(!nope.exists()); + + let pb = nope.to_path_buf(); + match Rkv::new::<SafeMode>(nope.as_path()).err() { + Some(StoreError::UnsuitableEnvironmentPath(p)) => { + assert_eq!(pb, p); + }, + _ => panic!("expected error"), + }; +} + +#[test] +fn test_open_safe() { + let root = Builder::new().prefix("test_open_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + check_rkv(&k); +} + +#[test] +fn test_open_from_builder_safe() { + let root = Builder::new().prefix("test_open_from_builder_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let mut builder = Rkv::environment_builder::<SafeMode>(); + builder.set_max_dbs(2); + + let k = Rkv::from_builder(root.path(), builder).expect("rkv"); + check_rkv(&k); +} + +#[test] +fn test_open_from_builder_with_dir_safe_1() { + let root = Builder::new().prefix("test_open_from_builder_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + + let mut builder = Rkv::environment_builder::<SafeMode>(); + builder.set_max_dbs(2); + builder.set_make_dir_if_needed(true); + + let k = Rkv::from_builder(root.path(), builder).expect("rkv"); + check_rkv(&k); +} + +#[test] +#[should_panic(expected = "rkv: UnsuitableEnvironmentPath(\"bogus\")")] +fn test_open_from_builder_with_dir_safe_2() { + let root = Path::new("bogus"); + println!("Root path: {:?}", root); + assert!(!root.is_dir()); + + let mut builder = Rkv::environment_builder::<SafeMode>(); + builder.set_max_dbs(2); + + let k = Rkv::from_builder(root, builder).expect("rkv"); + check_rkv(&k); +} + +#[test] +#[should_panic(expected = "opened: DbsFull")] +fn test_create_with_capacity_safe_1() { + let root = Builder::new().prefix("test_create_with_capacity_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<SafeMode>(root.path(), 1).expect("rkv"); + check_rkv(&k); + + // This errors with "opened: DbsFull" because we specified a capacity of one (database), + // and check_rkv already opened one (plus the default database, which doesn't count + // against the limit). + let _zzz = k.open_single("zzz", StoreOptions::create()).expect("opened"); +} + +#[test] +fn test_create_with_capacity_safe_2() { + let root = Builder::new().prefix("test_create_with_capacity_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<SafeMode>(root.path(), 1).expect("rkv"); + check_rkv(&k); + + // This doesn't error with "opened: DbsFull" because even though we specified a capacity + // of one (database), and check_rkv already opened one, the default database doesn't + // count against the limit). + let _zzz = k.open_single(None, StoreOptions::create()).expect("opened"); +} + +#[test] +#[should_panic(expected = "opened: SafeModeError(DbNotFoundError)")] +fn test_open_with_capacity_safe_1() { + let root = Builder::new().prefix("test_open_with_capacity_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<SafeMode>(root.path(), 1).expect("rkv"); + check_rkv(&k); + + let _zzz = k.open_single("zzz", StoreOptions::default()).expect("opened"); +} + +#[test] +fn test_open_with_capacity_safe_2() { + let root = Builder::new().prefix("test_open_with_capacity_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<SafeMode>(root.path(), 1).expect("rkv"); + check_rkv(&k); + + let _zzz = k.open_single(None, StoreOptions::default()).expect("opened"); +} + +#[test] +fn test_list_dbs_safe_1() { + let root = Builder::new().prefix("test_list_dbs_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<SafeMode>(root.path(), 1).expect("rkv"); + check_rkv(&k); + + let mut dbs = k.get_dbs().unwrap(); + dbs.sort(); + assert_eq!(dbs, vec![None, Some("s".to_owned())]); +} + +#[test] +fn test_list_dbs_safe_2() { + let root = Builder::new().prefix("test_list_dbs_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<SafeMode>(root.path(), 2).expect("rkv"); + check_rkv(&k); + + let _ = k.open_single("zzz", StoreOptions::create()).expect("opened"); + + let mut dbs = k.get_dbs().unwrap(); + dbs.sort(); + assert_eq!(dbs, vec![None, Some("s".to_owned()), Some("zzz".to_owned())]); +} + +#[test] +fn test_list_dbs_safe_3() { + let root = Builder::new().prefix("test_list_dbs_safe").tempdir().expect("tempdir"); + println!("Root path: {:?}", root.path()); + fs::create_dir_all(root.path()).expect("dir created"); + assert!(root.path().is_dir()); + + let k = Rkv::with_capacity::<SafeMode>(root.path(), 0).expect("rkv"); + + let _ = k.open_single(None, StoreOptions::create()).expect("opened"); + + let mut dbs = k.get_dbs().unwrap(); + dbs.sort(); + assert_eq!(dbs, vec![None]); +} + +#[test] +fn test_round_trip_and_transactions_safe() { + let root = Builder::new().prefix("test_round_trip_and_transactions_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + { + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&writer, "noo").expect("read"), Some(Value::F64(1234.0.into()))); + assert_eq!(sk.get(&writer, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&writer, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + + // Isolation. Reads won't return values. + let r = &k.read().unwrap(); + assert_eq!(sk.get(r, "foo").expect("read"), None); + assert_eq!(sk.get(r, "bar").expect("read"), None); + assert_eq!(sk.get(r, "baz").expect("read"), None); + } + + // Dropped: tx rollback. Reads will still return nothing. + + { + let r = &k.read().unwrap(); + assert_eq!(sk.get(r, "foo").expect("read"), None); + assert_eq!(sk.get(r, "bar").expect("read"), None); + assert_eq!(sk.get(r, "baz").expect("read"), None); + } + + { + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&writer, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&writer, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + + writer.commit().expect("committed"); + } + + // Committed. Reads will succeed. + + { + let r = k.read().unwrap(); + assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&r, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + + { + let mut writer = k.write().expect("writer"); + sk.delete(&mut writer, "foo").expect("deleted"); + sk.delete(&mut writer, "bar").expect("deleted"); + sk.delete(&mut writer, "baz").expect("deleted"); + assert_eq!(sk.get(&writer, "foo").expect("read"), None); + assert_eq!(sk.get(&writer, "bar").expect("read"), None); + assert_eq!(sk.get(&writer, "baz").expect("read"), None); + + // Isolation. Reads still return values. + let r = k.read().unwrap(); + assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&r, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + + // Dropped: tx rollback. Reads will still return values. + + { + let r = k.read().unwrap(); + assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(sk.get(&r, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + } + + { + let mut writer = k.write().expect("writer"); + sk.delete(&mut writer, "foo").expect("deleted"); + sk.delete(&mut writer, "bar").expect("deleted"); + sk.delete(&mut writer, "baz").expect("deleted"); + assert_eq!(sk.get(&writer, "foo").expect("read"), None); + assert_eq!(sk.get(&writer, "bar").expect("read"), None); + assert_eq!(sk.get(&writer, "baz").expect("read"), None); + + writer.commit().expect("committed"); + } + + // Committed. Reads will succeed but return None to indicate a missing value. + + { + let r = k.read().unwrap(); + assert_eq!(sk.get(&r, "foo").expect("read"), None); + assert_eq!(sk.get(&r, "bar").expect("read"), None); + assert_eq!(sk.get(&r, "baz").expect("read"), None); + } +} + +#[test] +fn test_single_store_clear_safe() { + let root = Builder::new().prefix("test_single_store_clear_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + { + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + sk.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + } + + { + let r = k.read().unwrap(); + let iter = sk.iter_start(&r).expect("iter"); + assert_eq!(iter.count(), 0); + } +} + +#[test] +#[should_panic(expected = "KeyValuePairNotFound")] +fn test_single_store_delete_nonexistent_safe() { + let root = Builder::new().prefix("test_single_store_delete_nonexistent_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + sk.delete(&mut writer, "bogus").unwrap(); +} + +#[test] +#[cfg(feature = "db-dup-sort")] +fn test_multi_put_get_del_safe() { + let root = Builder::new().prefix("test_multi_put_get_del_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let multistore = k.open_multi("multistore", StoreOptions::create()).unwrap(); + + let mut writer = k.write().unwrap(); + multistore.put(&mut writer, "str1", &Value::Str("str1 foo")).unwrap(); + multistore.put(&mut writer, "str1", &Value::Str("str1 bar")).unwrap(); + multistore.put(&mut writer, "str2", &Value::Str("str2 foo")).unwrap(); + multistore.put(&mut writer, "str2", &Value::Str("str2 bar")).unwrap(); + multistore.put(&mut writer, "str3", &Value::Str("str3 foo")).unwrap(); + multistore.put(&mut writer, "str3", &Value::Str("str3 bar")).unwrap(); + writer.commit().unwrap(); + + let writer = k.write().unwrap(); + { + let mut iter = multistore.get(&writer, "str1").unwrap(); + let (id, val) = iter.next().unwrap().unwrap(); + assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 bar"))); + let (id, val) = iter.next().unwrap().unwrap(); + assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 foo"))); + } + writer.commit().unwrap(); + + let mut writer = k.write().unwrap(); + multistore.delete(&mut writer, "str1", &Value::Str("str1 foo")).unwrap(); + assert_eq!(multistore.get_first(&writer, "str1").unwrap(), Some(Value::Str("str1 bar"))); + multistore.delete(&mut writer, "str2", &Value::Str("str2 bar")).unwrap(); + assert_eq!(multistore.get_first(&writer, "str2").unwrap(), Some(Value::Str("str2 foo"))); + multistore.delete_all(&mut writer, "str3").unwrap(); + assert_eq!(multistore.get_first(&writer, "str3").unwrap(), None); + writer.commit().unwrap(); +} + +#[test] +#[cfg(feature = "db-dup-sort")] +fn test_multiple_store_clear_safe() { + let root = Builder::new().prefix("test_multiple_store_clear_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let multistore = k.open_multi("multistore", StoreOptions::create()).expect("opened"); + + { + let mut writer = k.write().expect("writer"); + multistore.put(&mut writer, "str1", &Value::Str("str1 foo")).unwrap(); + multistore.put(&mut writer, "str1", &Value::Str("str1 bar")).unwrap(); + multistore.put(&mut writer, "str2", &Value::Str("str2 foo")).unwrap(); + multistore.put(&mut writer, "str2", &Value::Str("str2 bar")).unwrap(); + multistore.put(&mut writer, "str3", &Value::Str("str3 foo")).unwrap(); + multistore.put(&mut writer, "str3", &Value::Str("str3 bar")).unwrap(); + writer.commit().expect("committed"); + } + + { + let mut writer = k.write().expect("writer"); + multistore.clear(&mut writer).expect("cleared"); + writer.commit().expect("committed"); + } + + { + let r = k.read().unwrap(); + assert_eq!(multistore.get_first(&r, "str1").expect("read"), None); + assert_eq!(multistore.get_first(&r, "str2").expect("read"), None); + assert_eq!(multistore.get_first(&r, "str3").expect("read"), None); + } +} + +#[test] +fn test_open_store_for_read_safe() { + let root = Builder::new().prefix("test_open_store_for_read_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + + // First create the store, and start a write transaction on it. + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::Str("bar")).expect("write"); + + // Open the same store for read, note that the write transaction is still in progress, + // it should not block the reader though. + let sk_readonly = k.open_single("sk", StoreOptions::default()).expect("opened"); + writer.commit().expect("commit"); + + // Now the write transaction is committed, any followed reads should see its change. + let reader = k.read().expect("reader"); + assert_eq!(sk_readonly.get(&reader, "foo").expect("read"), Some(Value::Str("bar"))); +} + +#[test] +#[should_panic(expected = "open a missing store")] +fn test_open_a_missing_store_safe() { + let root = Builder::new().prefix("test_open_a_missing_store_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let _sk = k.open_single("sk", StoreOptions::default()).expect("open a missing store"); +} + +#[test] +#[should_panic(expected = "new failed: FileInvalid")] +fn test_open_a_broken_store_safe() { + let root = Builder::new().prefix("test_open_a_missing_store_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let dbfile = root.path().join("data.safe.bin"); + fs::write(dbfile, "bogus").expect("dbfile created"); + + let _ = Rkv::new::<SafeMode>(root.path()).expect("new failed"); +} + +#[test] +fn test_open_fail_with_badrslot_safe() { + let root = Builder::new().prefix("test_open_fail_with_badrslot_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + + // First create the store + let _sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + // Open a reader on this store + let _reader = k.read().expect("reader"); + + // Open the same store for read while the reader is in progress will panic + let store = k.open_single("sk", StoreOptions::default()); + match store { + Err(StoreError::OpenAttemptedDuringTransaction(_thread_id)) => (), + _ => panic!("should panic"), + } +} + +#[test] +fn test_create_fail_with_badrslot_safe() { + let root = Builder::new().prefix("test_create_fail_with_badrslot_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + + // First create the store + let _sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + // Open a reader on this store + let _reader = k.read().expect("reader"); + + // Open the same store for read while the reader is in progress will panic + let store = k.open_single("sk", StoreOptions::create()); + match store { + Err(StoreError::OpenAttemptedDuringTransaction(_thread_id)) => (), + _ => panic!("should panic"), + } +} + +#[test] +fn test_read_before_write_num_safe() { + let root = Builder::new().prefix("test_read_before_write_num_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + // Test reading a number, modifying it, and then writing it back. + // We have to be done with the Value::I64 before calling Writer::put, + // as the Value::I64 borrows an immutable reference to the Writer. + // So we extract and copy its primitive value. + + fn get_existing_foo(store: SingleStore<SafeModeDatabase>, writer: &Writer<SafeModeRwTransaction>) -> Option<i64> { + match store.get(writer, "foo").expect("read") { + Some(Value::I64(val)) => Some(val), + _ => None, + } + } + + let mut writer = k.write().expect("writer"); + let mut existing = get_existing_foo(sk, &writer).unwrap_or(99); + existing += 1; + sk.put(&mut writer, "foo", &Value::I64(existing)).expect("success"); + + let updated = get_existing_foo(sk, &writer).unwrap_or(99); + assert_eq!(updated, 100); + writer.commit().expect("commit"); +} + +#[test] +fn test_read_before_write_str_safe() { + let root = Builder::new().prefix("test_read_before_write_str_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + // Test reading a string, modifying it, and then writing it back. + // We have to be done with the Value::Str before calling Writer::put, + // as the Value::Str (and its underlying &str) borrows an immutable + // reference to the Writer. So we copy it to a String. + + fn get_existing_foo( + store: SingleStore<SafeModeDatabase>, + writer: &Writer<SafeModeRwTransaction>, + ) -> Option<String> { + match store.get(writer, "foo").expect("read") { + Some(Value::Str(val)) => Some(val.to_string()), + _ => None, + } + } + + let mut writer = k.write().expect("writer"); + let mut existing = get_existing_foo(sk, &writer).unwrap_or_default(); + existing.push('…'); + sk.put(&mut writer, "foo", &Value::Str(&existing)).expect("write"); + + let updated = get_existing_foo(sk, &writer).unwrap_or_default(); + assert_eq!(updated, "…"); + writer.commit().expect("commit"); +} + +#[test] +fn test_isolation_safe() { + let root = Builder::new().prefix("test_isolation_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let s = k.open_single("s", StoreOptions::create()).expect("opened"); + + // Add one field. + { + let mut writer = k.write().expect("writer"); + s.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + writer.commit().expect("committed"); + } + + { + let reader = k.read().unwrap(); + assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + } + + // Establish a long-lived reader that outlasts a writer. + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + + // Start a write transaction. + let mut writer = k.write().expect("writer"); + s.put(&mut writer, "foo", &Value::I64(999)).expect("wrote"); + + // The reader and writer are isolated. + assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(s.get(&writer, "foo").expect("read"), Some(Value::I64(999))); + + // If we commit the writer, we still have isolation. + writer.commit().expect("committed"); + assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + + // A new reader sees the committed value. Note that LMDB doesn't allow two + // read transactions to exist in the same thread, so we abort the previous one. + reader.abort(); + let reader = k.read().expect("reader"); + assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(999))); +} + +#[test] +fn test_blob_safe() { + let root = Builder::new().prefix("test_round_trip_blob_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + assert_eq!(sk.get(&writer, "foo").expect("read"), None); + sk.put(&mut writer, "foo", &Value::Blob(&[1, 2, 3, 4])).expect("wrote"); + assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::Blob(&[1, 2, 3, 4]))); + + fn u16_to_u8(src: &[u16]) -> Vec<u8> { + let mut dst = vec![0; 2 * src.len()]; + LittleEndian::write_u16_into(src, &mut dst); + dst + } + + fn u8_to_u16(src: &[u8]) -> Vec<u16> { + let mut dst = vec![0; src.len() / 2]; + LittleEndian::read_u16_into(src, &mut dst); + dst + } + + // When storing UTF-16 strings as blobs, we'll need to convert + // their [u16] backing storage to [u8]. Test that converting, writing, + // reading, and converting back works as expected. + let u16_array = [1000, 10000, 54321, 65535]; + assert_eq!(sk.get(&writer, "bar").expect("read"), None); + sk.put(&mut writer, "bar", &Value::Blob(&u16_to_u8(&u16_array))).expect("wrote"); + let u8_array = match sk.get(&writer, "bar").expect("read") { + Some(Value::Blob(val)) => val, + _ => &[], + }; + assert_eq!(u8_to_u16(u8_array), u16_array); +} + +#[test] +fn test_sync_safe() { + let root = Builder::new().prefix("test_sync_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let mut builder = Rkv::environment_builder::<SafeMode>(); + builder.set_max_dbs(1); + { + let k = Rkv::from_builder(root.path(), builder).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + { + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + writer.commit().expect("committed"); + k.sync(true).expect("synced"); + } + } + let k = Rkv::from_builder(root.path(), builder).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::default()).expect("opened"); + let reader = k.read().expect("reader"); + assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); +} + +#[test] +fn test_iter_safe() { + let root = Builder::new().prefix("test_iter_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + // An iterator over an empty store returns no values. + { + let reader = k.read().unwrap(); + let mut iter = sk.iter_start(&reader).unwrap(); + assert!(iter.next().is_none()); + } + + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + sk.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")).expect("wrote"); + sk.put(&mut writer, "你好,遊客", &Value::Str("米克規則")).expect("wrote"); + writer.commit().expect("committed"); + + let reader = k.read().unwrap(); + + // Reader.iter() returns (key, value) tuples ordered by key. + let mut iter = sk.iter_start(&reader).unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "bar"); + assert_eq!(val, Value::Bool(true)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "baz"); + assert_eq!(val, Value::Str("héllo, yöu")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "foo"); + assert_eq!(val, Value::I64(1234)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); + assert_eq!(val, Value::Str("Emil.RuleZ!")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterators don't loop. Once one returns None, additional calls + // to its next() method will always return None. + assert!(iter.next().is_none()); + + // Reader.iter_from() begins iteration at the first key equal to + // or greater than the given key. + let mut iter = sk.iter_from(&reader, "moo").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Reader.iter_from() works as expected when the given key is a prefix + // of a key in the store. + let mut iter = sk.iter_from(&reader, "no").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); +} + +#[test] +fn test_iter_from_key_greater_than_existing_safe() { + let root = Builder::new().prefix("test_iter_from_key_greater_than_existing_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + sk.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote"); + sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + writer.commit().expect("committed"); + + let reader = k.read().unwrap(); + let mut iter = sk.iter_from(&reader, "nuu").unwrap(); + assert!(iter.next().is_none()); +} + +#[test] +fn test_multiple_store_read_write_safe() { + let root = Builder::new().prefix("test_multiple_store_read_write_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let s1 = k.open_single("store_1", StoreOptions::create()).expect("opened"); + let s2 = k.open_single("store_2", StoreOptions::create()).expect("opened"); + let s3 = k.open_single("store_3", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + s1.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote"); + s2.put(&mut writer, "foo", &Value::I64(123)).expect("wrote"); + s3.put(&mut writer, "foo", &Value::Bool(true)).expect("wrote"); + + assert_eq!(s1.get(&writer, "foo").expect("read"), Some(Value::Str("bar"))); + assert_eq!(s2.get(&writer, "foo").expect("read"), Some(Value::I64(123))); + assert_eq!(s3.get(&writer, "foo").expect("read"), Some(Value::Bool(true))); + + writer.commit().expect("committed"); + + let reader = k.read().expect("unbound_reader"); + assert_eq!(s1.get(&reader, "foo").expect("read"), Some(Value::Str("bar"))); + assert_eq!(s2.get(&reader, "foo").expect("read"), Some(Value::I64(123))); + assert_eq!(s3.get(&reader, "foo").expect("read"), Some(Value::Bool(true))); + reader.abort(); + + // test delete across multiple stores + let mut writer = k.write().expect("writer"); + s1.delete(&mut writer, "foo").expect("deleted"); + s2.delete(&mut writer, "foo").expect("deleted"); + s3.delete(&mut writer, "foo").expect("deleted"); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!(s1.get(&reader, "key").expect("value"), None); + assert_eq!(s2.get(&reader, "key").expect("value"), None); + assert_eq!(s3.get(&reader, "key").expect("value"), None); +} + +#[test] +fn test_multiple_store_iter_safe() { + let root = Builder::new().prefix("test_multiple_store_iter_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let s1 = k.open_single("store_1", StoreOptions::create()).expect("opened"); + let s2 = k.open_single("store_2", StoreOptions::create()).expect("opened"); + + let mut writer = k.write().expect("writer"); + // Write to "s1" + s1.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + s1.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote"); + s1.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + s1.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + s1.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")).expect("wrote"); + s1.put(&mut writer, "你好,遊客", &Value::Str("米克規則")).expect("wrote"); + // &mut writer to "s2" + s2.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + s2.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote"); + s2.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + s2.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + s2.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")).expect("wrote"); + s2.put(&mut writer, "你好,遊客", &Value::Str("米克規則")).expect("wrote"); + writer.commit().expect("committed"); + + let reader = k.read().unwrap(); + + // Iterate through the whole store in "s1" + let mut iter = s1.iter_start(&reader).unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "bar"); + assert_eq!(val, Value::Bool(true)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "baz"); + assert_eq!(val, Value::Str("héllo, yöu")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "foo"); + assert_eq!(val, Value::I64(1234)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); + assert_eq!(val, Value::Str("Emil.RuleZ!")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterate through the whole store in "s2" + let mut iter = s2.iter_start(&reader).unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "bar"); + assert_eq!(val, Value::Bool(true)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "baz"); + assert_eq!(val, Value::Str("héllo, yöu")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "foo"); + assert_eq!(val, Value::I64(1234)); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); + assert_eq!(val, Value::Str("Emil.RuleZ!")); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterate from a given key in "s1" + let mut iter = s1.iter_from(&reader, "moo").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterate from a given key in "s2" + let mut iter = s2.iter_from(&reader, "moo").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterate from a given prefix in "s1" + let mut iter = s1.iter_from(&reader, "no").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); + + // Iterate from a given prefix in "s2" + let mut iter = s2.iter_from(&reader, "no").unwrap(); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "noo"); + assert_eq!(val, Value::F64(1234.0.into())); + let (key, val) = iter.next().unwrap().unwrap(); + assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); + assert_eq!(val, Value::Str("米克規則")); + assert!(iter.next().is_none()); +} + +#[test] +fn test_store_multiple_thread_safe() { + let root = Builder::new().prefix("test_multiple_thread_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let rkv_arc = Arc::new(RwLock::new(Rkv::new::<SafeMode>(root.path()).expect("new succeeded"))); + let store = rkv_arc.read().unwrap().open_single("test", StoreOptions::create()).expect("opened"); + + let num_threads = 10; + let mut write_handles = Vec::with_capacity(num_threads as usize); + let mut read_handles = Vec::with_capacity(num_threads as usize); + + // Note that this isn't intended to demonstrate a good use of threads. + // For this shape of data, it would be more performant to write/read + // all values using one transaction in a single thread. The point here + // is just to confirm that a store can be shared by multiple threads. + + // For each KV pair, spawn a thread that writes it to the store. + for i in 0..num_threads { + let rkv_arc = rkv_arc.clone(); + write_handles.push(thread::spawn(move || { + let rkv = rkv_arc.write().expect("rkv"); + let mut writer = rkv.write().expect("writer"); + store.put(&mut writer, i.to_string(), &Value::U64(i)).expect("written"); + writer.commit().unwrap(); + })); + } + for handle in write_handles { + handle.join().expect("joined"); + } + + // For each KV pair, spawn a thread that reads it from the store + // and returns its value. + for i in 0..num_threads { + let rkv_arc = rkv_arc.clone(); + read_handles.push(thread::spawn(move || { + let rkv = rkv_arc.read().expect("rkv"); + let reader = rkv.read().expect("reader"); + let value = match store.get(&reader, i.to_string()) { + Ok(Some(Value::U64(value))) => value, + Ok(Some(_)) => panic!("value type unexpected"), + Ok(None) => panic!("value not found"), + Err(err) => panic!(err), + }; + assert_eq!(value, i); + value + })); + } + + // Sum the values returned from the threads and confirm that they're + // equal to the sum of values written to the threads. + let thread_sum: u64 = read_handles.into_iter().map(|handle| handle.join().expect("value")).sum(); + assert_eq!(thread_sum, (0..num_threads).sum()); +} + +#[test] +fn test_use_value_as_key_safe() { + let root = Builder::new().prefix("test_use_value_as_key_safe").tempdir().expect("tempdir"); + let rkv = Rkv::new::<SafeMode>(root.path()).expect("new succeeded"); + let store = rkv.open_single("store", StoreOptions::create()).expect("opened"); + + { + let mut writer = rkv.write().expect("writer"); + store.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote"); + store.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote"); + writer.commit().expect("committed"); + } + + // It's possible to retrieve a value with a Reader and then use it + // as a key with a Writer. + { + let reader = &rkv.read().unwrap(); + if let Some(Value::Str(key)) = store.get(reader, "foo").expect("read") { + let mut writer = rkv.write().expect("writer"); + store.delete(&mut writer, key).expect("deleted"); + writer.commit().expect("committed"); + } + } + + { + let mut writer = rkv.write().expect("writer"); + store.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote"); + writer.commit().expect("committed"); + } + + // You can also retrieve a Value with a Writer and then use it as a key + // with the same Writer if you copy the value to an owned type + // so the Writer isn't still being borrowed by the retrieved value + // when you try to borrow the Writer again to modify that value. + { + let mut writer = rkv.write().expect("writer"); + if let Some(Value::Str(value)) = store.get(&writer, "foo").expect("read") { + let key = value.to_owned(); + store.delete(&mut writer, key).expect("deleted"); + writer.commit().expect("committed"); + } + } + + { + let name1 = rkv.open_single("name1", StoreOptions::create()).expect("opened"); + let name2 = rkv.open_single("name2", StoreOptions::create()).expect("opened"); + let mut writer = rkv.write().expect("writer"); + name1.put(&mut writer, "key1", &Value::Str("bar")).expect("wrote"); + name1.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote"); + name2.put(&mut writer, "key2", &Value::Str("bar")).expect("wrote"); + name2.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote"); + writer.commit().expect("committed"); + } + + // You can also iterate (store, key) pairs to retrieve foreign keys, + // then iterate those foreign keys to modify/delete them. + // + // You need to open the stores in advance, since opening a store + // uses a write transaction internally, so opening them while a writer + // is extant will hang. + // + // And you need to copy the values to an owned type so the Writer isn't + // still being borrowed by a retrieved value when you try to borrow + // the Writer again to modify another value. + let fields = vec![ + (rkv.open_single("name1", StoreOptions::create()).expect("opened"), "key1"), + (rkv.open_single("name2", StoreOptions::create()).expect("opened"), "key2"), + ]; + { + let mut foreignkeys = Vec::new(); + let mut writer = rkv.write().expect("writer"); + for (store, key) in fields.iter() { + if let Some(Value::Str(value)) = store.get(&writer, key).expect("read") { + foreignkeys.push((store, value.to_owned())); + } + } + for (store, key) in foreignkeys.iter() { + store.delete(&mut writer, key).expect("deleted"); + } + writer.commit().expect("committed"); + } +} diff --git a/third_party/rust/rkv/tests/integer-store.rs b/third_party/rust/rkv/tests/integer-store.rs new file mode 100644 index 0000000000..680afc1da3 --- /dev/null +++ b/third_party/rust/rkv/tests/integer-store.rs @@ -0,0 +1,83 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +#![cfg(feature = "db-int-key")] +#![allow(clippy::many_single_char_names)] + +use std::fs; + +use serde_derive::Serialize; +use tempfile::Builder; + +use rkv::{ + backend::Lmdb, + PrimitiveInt, + Rkv, + StoreOptions, + Value, +}; + +#[test] +fn test_integer_keys() { + let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_integer("s", StoreOptions::create()).expect("open"); + + macro_rules! test_integer_keys { + ($store:expr, $key:expr) => {{ + let mut writer = k.write().expect("writer"); + + $store.put(&mut writer, $key, &Value::Str("hello!")).expect("write"); + assert_eq!($store.get(&writer, $key).expect("read"), Some(Value::Str("hello!"))); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + assert_eq!($store.get(&reader, $key).expect("read"), Some(Value::Str("hello!"))); + }}; + } + + // The integer module provides only the u32 integer key variant + // of IntegerStore, so we can use it without further ado. + test_integer_keys!(s, std::u32::MIN); + test_integer_keys!(s, std::u32::MAX); + + // If you want to use another integer key variant, you need to implement + // a newtype, implement PrimitiveInt, and implement or derive Serialize + // for it. Here we do so for the i32 type. + + // DANGER! Doing this enables you to open a store with multiple, + // different integer key types, which may result in unexpected behavior. + // Make sure you know what you're doing! + + let t = k.open_integer("s", StoreOptions::create()).expect("open"); + + #[derive(Serialize)] + struct I32(i32); + impl PrimitiveInt for I32 {} + test_integer_keys!(t, I32(std::i32::MIN)); + test_integer_keys!(t, I32(std::i32::MAX)); + + let u = k.open_integer("s", StoreOptions::create()).expect("open"); + + #[derive(Serialize)] + struct U16(u16); + impl PrimitiveInt for U16 {} + test_integer_keys!(u, U16(std::u16::MIN)); + test_integer_keys!(u, U16(std::u16::MAX)); + + let v = k.open_integer("s", StoreOptions::create()).expect("open"); + + #[derive(Serialize)] + struct U64(u64); + impl PrimitiveInt for U64 {} + test_integer_keys!(v, U64(std::u64::MIN)); + test_integer_keys!(v, U64(std::u64::MAX)); +} diff --git a/third_party/rust/rkv/tests/manager.rs b/third_party/rust/rkv/tests/manager.rs new file mode 100644 index 0000000000..c0c7d073e4 --- /dev/null +++ b/third_party/rust/rkv/tests/manager.rs @@ -0,0 +1,257 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +use std::{ + fs, + sync::Arc, +}; + +use tempfile::Builder; + +use rkv::{ + backend::{ + BackendEnvironmentBuilder, + Lmdb, + LmdbEnvironment, + SafeMode, + SafeModeEnvironment, + }, + CloseOptions, + Rkv, + StoreOptions, + Value, +}; + +/// Test that a manager can be created with simple type inference. +#[test] +#[allow(clippy::let_underscore_lock)] +fn test_simple() { + type Manager = rkv::Manager<LmdbEnvironment>; + + let _ = Manager::singleton().write().unwrap(); +} + +/// Test that a manager can be created with simple type inference. +#[test] +#[allow(clippy::let_underscore_lock)] +fn test_simple_safe() { + type Manager = rkv::Manager<SafeModeEnvironment>; + + let _ = Manager::singleton().write().unwrap(); +} + +/// Test that a shared Rkv instance can be created with simple type inference. +#[test] +fn test_simple_2() { + type Manager = rkv::Manager<LmdbEnvironment>; + + let root = Builder::new().prefix("test_simple_2").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let mut manager = Manager::singleton().write().unwrap(); + let _ = manager.get_or_create(root.path(), Rkv::new::<Lmdb>).unwrap(); +} + +/// Test that a shared Rkv instance can be created with simple type inference. +#[test] +fn test_simple_safe_2() { + type Manager = rkv::Manager<SafeModeEnvironment>; + + let root = Builder::new().prefix("test_simple_safe_2").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let mut manager = Manager::singleton().write().unwrap(); + let _ = manager.get_or_create(root.path(), Rkv::new::<SafeMode>).unwrap(); +} + +/// Test that the manager will return the same Rkv instance each time for each path. +#[test] +fn test_same() { + type Manager = rkv::Manager<LmdbEnvironment>; + + let root = Builder::new().prefix("test_same").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let p = root.path(); + assert!(Manager::singleton().read().unwrap().get(p).expect("success").is_none()); + + let created_arc = Manager::singleton().write().unwrap().get_or_create(p, Rkv::new::<Lmdb>).expect("created"); + let fetched_arc = Manager::singleton().read().unwrap().get(p).expect("success").expect("existed"); + assert!(Arc::ptr_eq(&created_arc, &fetched_arc)); +} + +/// Test that the manager will return the same Rkv instance each time for each path. +#[test] +fn test_same_safe() { + type Manager = rkv::Manager<SafeModeEnvironment>; + + let root = Builder::new().prefix("test_same_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let p = root.path(); + assert!(Manager::singleton().read().unwrap().get(p).expect("success").is_none()); + + let created_arc = Manager::singleton().write().unwrap().get_or_create(p, Rkv::new::<SafeMode>).expect("created"); + let fetched_arc = Manager::singleton().read().unwrap().get(p).expect("success").expect("existed"); + assert!(Arc::ptr_eq(&created_arc, &fetched_arc)); +} + +/// Test that the manager will return the same Rkv instance each time for each path. +#[test] +fn test_same_with_capacity() { + type Manager = rkv::Manager<LmdbEnvironment>; + + let root = Builder::new().prefix("test_same_with_capacity").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let mut manager = Manager::singleton().write().unwrap(); + + let p = root.path(); + assert!(manager.get(p).expect("success").is_none()); + + let created_arc = manager.get_or_create_with_capacity(p, 10, Rkv::with_capacity::<Lmdb>).expect("created"); + let fetched_arc = manager.get(p).expect("success").expect("existed"); + assert!(Arc::ptr_eq(&created_arc, &fetched_arc)); +} + +/// Test that the manager will return the same Rkv instance each time for each path. +#[test] +fn test_same_with_capacity_safe() { + type Manager = rkv::Manager<SafeModeEnvironment>; + + let root = Builder::new().prefix("test_same_with_capacity_safe").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let mut manager = Manager::singleton().write().unwrap(); + + let p = root.path(); + assert!(manager.get(p).expect("success").is_none()); + + let created_arc = manager.get_or_create_with_capacity(p, 10, Rkv::with_capacity::<SafeMode>).expect("created"); + let fetched_arc = manager.get(p).expect("success").expect("existed"); + assert!(Arc::ptr_eq(&created_arc, &fetched_arc)); +} + +/// Some storage drivers are able to discard when the database is corrupted at runtime. +/// Test how these managers can discard corrupted databases and re-open. +#[test] +fn test_safe_mode_corrupt_while_open_1() { + type Manager = rkv::Manager<SafeModeEnvironment>; + + let root = Builder::new().prefix("test_safe_mode_corrupt_while_open_1").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // Create environment. + let mut manager = Manager::singleton().write().unwrap(); + let shared_env = manager.get_or_create(root.path(), Rkv::new::<SafeMode>).expect("created"); + let env = shared_env.read().unwrap(); + + // Write some data. + let store = env.open_single("store", StoreOptions::create()).expect("opened"); + let mut writer = env.write().expect("writer"); + store.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + store.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + store.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + writer.commit().expect("committed"); + env.sync(true).expect("synced"); + + // Verify it was flushed to disk. + let mut safebin = root.path().to_path_buf(); + safebin.push("data.safe.bin"); + assert!(safebin.exists()); + + // Oops, corruption. + fs::write(&safebin, "bogus").expect("dbfile corrupted"); + + // Close everything. + drop(env); + drop(shared_env); + manager.try_close(root.path(), CloseOptions::default()).expect("closed without deleting"); + assert!(manager.get(root.path()).expect("success").is_none()); + + // Recreating environment fails. + manager.get_or_create(root.path(), Rkv::new::<SafeMode>).expect_err("not created"); + assert!(manager.get(root.path()).expect("success").is_none()); + + // But we can use a builder and pass `discard_if_corrupted` to deal with it. + let mut builder = Rkv::environment_builder::<SafeMode>(); + builder.set_discard_if_corrupted(true); + manager.get_or_create_from_builder(root.path(), builder, Rkv::from_builder::<SafeMode>).expect("created"); + assert!(manager.get(root.path()).expect("success").is_some()); +} + +/// Some storage drivers are able to recover when the database is corrupted at runtime. +/// Test how these managers can recover corrupted databases while open. +#[test] +fn test_safe_mode_corrupt_while_open_2() { + type Manager = rkv::Manager<SafeModeEnvironment>; + + let root = Builder::new().prefix("test_safe_mode_corrupt_while_open_2").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + // Create environment. + let mut manager = Manager::singleton().write().unwrap(); + let shared_env = manager.get_or_create(root.path(), Rkv::new::<SafeMode>).expect("created"); + let env = shared_env.read().unwrap(); + + // Write some data. + let store = env.open_single("store", StoreOptions::create()).expect("opened"); + let mut writer = env.write().expect("writer"); + store.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); + store.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote"); + store.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote"); + writer.commit().expect("committed"); + env.sync(true).expect("synced"); + + // Verify it was flushed to disk. + let mut safebin = root.path().to_path_buf(); + safebin.push("data.safe.bin"); + assert!(safebin.exists()); + + // Oops, corruption. + fs::write(&safebin, "bogus").expect("dbfile corrupted"); + + // Reading still works. Magic. + let store = env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + reader.abort(); + + // Writing still works, dbfile will be un-corrupted. + let store = env.open_single("store", StoreOptions::default()).expect("opened"); + let mut writer = env.write().expect("writer"); + store.put(&mut writer, "foo2", &Value::I64(5678)).expect("wrote"); + store.put(&mut writer, "bar2", &Value::Bool(false)).expect("wrote"); + store.put(&mut writer, "baz2", &Value::Str("byé, yöu")).expect("wrote"); + writer.commit().expect("committed"); + env.sync(true).expect("synced"); + + // Close everything. + drop(env); + drop(shared_env); + manager.try_close(root.path(), CloseOptions::default()).expect("closed without deleting"); + assert!(manager.get(root.path()).expect("success").is_none()); + + // Recreate environment. + let shared_env = manager.get_or_create(root.path(), Rkv::new::<SafeMode>).expect("created"); + let env = shared_env.read().unwrap(); + + // Verify that the dbfile is not corrupted. + let store = env.open_single("store", StoreOptions::default()).expect("opened"); + let reader = env.read().expect("reader"); + assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); + assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true))); + assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu"))); + assert_eq!(store.get(&reader, "foo2").expect("read"), Some(Value::I64(5678))); + assert_eq!(store.get(&reader, "bar2").expect("read"), Some(Value::Bool(false))); + assert_eq!(store.get(&reader, "baz2").expect("read"), Some(Value::Str("byé, yöu"))); +} diff --git a/third_party/rust/rkv/tests/multi-integer-store.rs b/third_party/rust/rkv/tests/multi-integer-store.rs new file mode 100644 index 0000000000..204d52f93d --- /dev/null +++ b/third_party/rust/rkv/tests/multi-integer-store.rs @@ -0,0 +1,97 @@ +// Copyright 2018 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +#![cfg(all(feature = "db-dup-sort", feature = "db-int-key"))] +#![allow(clippy::many_single_char_names)] + +use std::fs; + +use serde_derive::Serialize; +use tempfile::Builder; + +use rkv::{ + backend::Lmdb, + PrimitiveInt, + Rkv, + StoreOptions, + Value, +}; + +#[test] +fn test_multi_integer_keys() { + let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let s = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + macro_rules! test_integer_keys { + ($store:expr, $key:expr) => {{ + let mut writer = k.write().expect("writer"); + + $store.put(&mut writer, $key, &Value::Str("hello1")).expect("write"); + $store.put(&mut writer, $key, &Value::Str("hello2")).expect("write"); + $store.put(&mut writer, $key, &Value::Str("hello3")).expect("write"); + let vals = $store + .get(&writer, $key) + .expect("read") + .map(|result| result.expect("ok")) + .map(|(_, v)| v) + .collect::<Vec<Value>>(); + assert_eq!(vals, vec![Value::Str("hello1"), Value::Str("hello2"), Value::Str("hello3")]); + writer.commit().expect("committed"); + + let reader = k.read().expect("reader"); + let vals = $store + .get(&reader, $key) + .expect("read") + .map(|result| result.expect("ok")) + .map(|(_, v)| v) + .collect::<Vec<Value>>(); + assert_eq!(vals, vec![Value::Str("hello1"), Value::Str("hello2"), Value::Str("hello3")]); + }}; + } + + // The integer module provides only the u32 integer key variant + // of IntegerStore, so we can use it without further ado. + test_integer_keys!(s, std::u32::MIN); + test_integer_keys!(s, std::u32::MAX); + + // If you want to use another integer key variant, you need to implement + // a newtype, implement PrimitiveInt, and implement or derive Serialize + // for it. Here we do so for the i32 type. + + // DANGER! Doing this enables you to open a store with multiple, + // different integer key types, which may result in unexpected behavior. + // Make sure you know what you're doing! + + let t = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + #[derive(Serialize)] + struct I32(i32); + impl PrimitiveInt for I32 {} + test_integer_keys!(t, I32(std::i32::MIN)); + test_integer_keys!(t, I32(std::i32::MAX)); + + let u = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + #[derive(Serialize)] + struct U16(u16); + impl PrimitiveInt for U16 {} + test_integer_keys!(u, U16(std::u16::MIN)); + test_integer_keys!(u, U16(std::u16::MAX)); + + let v = k.open_multi_integer("s", StoreOptions::create()).expect("open"); + + #[derive(Serialize)] + struct U64(u64); + impl PrimitiveInt for U64 {} + test_integer_keys!(v, U64(std::u64::MIN)); + test_integer_keys!(v, U64(std::u64::MAX)); +} diff --git a/third_party/rust/rkv/tests/test_txn.rs b/third_party/rust/rkv/tests/test_txn.rs new file mode 100644 index 0000000000..8e63924e0f --- /dev/null +++ b/third_party/rust/rkv/tests/test_txn.rs @@ -0,0 +1,132 @@ +// Copyright 2018-2019 Mozilla +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +#![cfg(feature = "db-dup-sort")] + +use std::fs; + +use tempfile::Builder; + +use rkv::{ + backend::{ + Lmdb, + LmdbDatabase, + LmdbRoCursor, + LmdbRwTransaction, + }, + Readable, + Rkv, + StoreOptions, + Value, + Writer, +}; + +/// Consider a struct like this: +/// struct Sample { +/// id: u64, +/// value: String, +/// date: String, +/// } +/// We would like to index all of the fields so that we can search for the struct not only +/// by ID but also by value and date. When we index the fields individually in their own +/// tables, it is important that we run all operations within a single transaction to +/// ensure coherence of the indices. +/// This test features helper functions for reading and writing the parts of the struct. +/// Note that the reader functions take `Readable` because they might run within a Read +/// Transaction or a Write Transaction. The test demonstrates fetching values via both. + +type SingleStore = rkv::SingleStore<LmdbDatabase>; +type MultiStore = rkv::MultiStore<LmdbDatabase>; + +#[test] +fn read_many() { + let root = Builder::new().prefix("test_txns").tempdir().expect("tempdir"); + fs::create_dir_all(root.path()).expect("dir created"); + let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded"); + let samplestore = k.open_single("s", StoreOptions::create()).expect("open"); + let datestore = k.open_multi("m", StoreOptions::create()).expect("open"); + let valuestore = k.open_multi("m", StoreOptions::create()).expect("open"); + + { + let mut writer = k.write().expect("env write lock"); + + for id in 0..30_u64 { + let value = format!("value{}", id); + let date = format!("2019-06-{}", id); + put_id_field(&mut writer, datestore, &date, id); + put_id_field(&mut writer, valuestore, &value, id); + put_sample(&mut writer, samplestore, id, &value); + } + + // now we read in the same transaction + for id in 0..30_u64 { + let value = format!("value{}", id); + let date = format!("2019-06-{}", id); + let ids = get_ids_by_field(&writer, datestore, &date); + let ids2 = get_ids_by_field(&writer, valuestore, &value); + let samples = get_samples(&writer, samplestore, &ids); + let samples2 = get_samples(&writer, samplestore, &ids2); + println!("{:?}, {:?}", samples, samples2); + } + } + + { + let reader = k.read().expect("env read lock"); + for id in 0..30_u64 { + let value = format!("value{}", id); + let date = format!("2019-06-{}", id); + let ids = get_ids_by_field(&reader, datestore, &date); + let ids2 = get_ids_by_field(&reader, valuestore, &value); + let samples = get_samples(&reader, samplestore, &ids); + let samples2 = get_samples(&reader, samplestore, &ids2); + println!("{:?}, {:?}", samples, samples2); + } + } +} + +fn get_ids_by_field<'t, T>(txn: &'t T, store: MultiStore, field: &'t str) -> Vec<u64> +where + T: Readable<'t, Database = LmdbDatabase, RoCursor = LmdbRoCursor<'t>>, +{ + store + .get(txn, field) + .expect("get iterator") + .map(|id| { + match id.expect("field") { + (_, Value::U64(id)) => id, + _ => panic!("getting value in iter"), + } + }) + .collect::<Vec<u64>>() +} + +fn get_samples<'t, T>(txn: &'t T, samplestore: SingleStore, ids: &[u64]) -> Vec<String> +where + T: Readable<'t, Database = LmdbDatabase, RoCursor = LmdbRoCursor<'t>>, +{ + ids.iter() + .map(|id| { + let bytes = id.to_be_bytes(); + match samplestore.get(txn, &bytes).expect("fetch sample") { + Some(Value::Str(sample)) => String::from(sample), + Some(_) => panic!("wrong type"), + None => panic!("no sample for this id!"), + } + }) + .collect::<Vec<String>>() +} + +fn put_sample(txn: &mut Writer<LmdbRwTransaction>, samplestore: SingleStore, id: u64, value: &str) { + let idbytes = id.to_be_bytes(); + samplestore.put(txn, &idbytes, &Value::Str(value)).expect("put id"); +} + +fn put_id_field(txn: &mut Writer<LmdbRwTransaction>, store: MultiStore, field: &str, id: u64) { + store.put(txn, field, &Value::U64(id)).expect("put id"); +} |