From 10ee2acdd26a7f1298c6f6d6b7af9b469fe29b87 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 4 May 2024 14:41:41 +0200 Subject: Merging upstream version 1.70.0+dfsg2. Signed-off-by: Daniel Baumann --- vendor/crypto-bigint/.cargo-checksum.json | 1 + vendor/crypto-bigint/CHANGELOG.md | 273 ++++++++++++++ vendor/crypto-bigint/Cargo.toml | 107 ++++++ vendor/crypto-bigint/LICENSE-APACHE | 201 +++++++++++ vendor/crypto-bigint/LICENSE-MIT | 25 ++ vendor/crypto-bigint/README.md | 64 ++++ vendor/crypto-bigint/src/array.rs | 41 +++ vendor/crypto-bigint/src/checked.rs | 131 +++++++ vendor/crypto-bigint/src/lib.rs | 199 +++++++++++ vendor/crypto-bigint/src/limb.rs | 189 ++++++++++ vendor/crypto-bigint/src/limb/add.rs | 180 ++++++++++ vendor/crypto-bigint/src/limb/bit_and.rs | 21 ++ vendor/crypto-bigint/src/limb/bit_not.rs | 19 + vendor/crypto-bigint/src/limb/bit_or.rs | 19 + vendor/crypto-bigint/src/limb/bit_xor.rs | 19 + vendor/crypto-bigint/src/limb/bits.rs | 8 + vendor/crypto-bigint/src/limb/cmp.rs | 176 +++++++++ vendor/crypto-bigint/src/limb/encoding.rs | 67 ++++ vendor/crypto-bigint/src/limb/from.rs | 76 ++++ vendor/crypto-bigint/src/limb/mul.rs | 195 ++++++++++ vendor/crypto-bigint/src/limb/rand.rs | 45 +++ vendor/crypto-bigint/src/limb/shl.rs | 73 ++++ vendor/crypto-bigint/src/limb/shr.rs | 73 ++++ vendor/crypto-bigint/src/limb/sub.rs | 182 ++++++++++ vendor/crypto-bigint/src/nlimbs.rs | 29 ++ vendor/crypto-bigint/src/non_zero.rs | 385 ++++++++++++++++++++ vendor/crypto-bigint/src/traits.rs | 227 ++++++++++++ vendor/crypto-bigint/src/uint.rs | 484 +++++++++++++++++++++++++ vendor/crypto-bigint/src/uint/add.rs | 189 ++++++++++ vendor/crypto-bigint/src/uint/add_mod.rs | 139 ++++++++ vendor/crypto-bigint/src/uint/array.rs | 189 ++++++++++ vendor/crypto-bigint/src/uint/bit_and.rs | 145 ++++++++ vendor/crypto-bigint/src/uint/bit_not.rs | 48 +++ vendor/crypto-bigint/src/uint/bit_or.rs | 141 ++++++++ vendor/crypto-bigint/src/uint/bit_xor.rs | 141 ++++++++ vendor/crypto-bigint/src/uint/bits.rs | 55 +++ vendor/crypto-bigint/src/uint/cmp.rs | 196 ++++++++++ vendor/crypto-bigint/src/uint/concat.rs | 60 ++++ vendor/crypto-bigint/src/uint/div.rs | 496 ++++++++++++++++++++++++++ vendor/crypto-bigint/src/uint/encoding.rs | 278 +++++++++++++++ vendor/crypto-bigint/src/uint/encoding/der.rs | 69 ++++ vendor/crypto-bigint/src/uint/encoding/rlp.rs | 79 ++++ vendor/crypto-bigint/src/uint/from.rs | 238 ++++++++++++ vendor/crypto-bigint/src/uint/inv_mod.rs | 62 ++++ vendor/crypto-bigint/src/uint/mul.rs | 246 +++++++++++++ vendor/crypto-bigint/src/uint/mul_mod.rs | 131 +++++++ vendor/crypto-bigint/src/uint/neg_mod.rs | 68 ++++ vendor/crypto-bigint/src/uint/rand.rs | 92 +++++ vendor/crypto-bigint/src/uint/resize.rs | 37 ++ vendor/crypto-bigint/src/uint/shl.rs | 134 +++++++ vendor/crypto-bigint/src/uint/shr.rs | 93 +++++ vendor/crypto-bigint/src/uint/split.rs | 58 +++ vendor/crypto-bigint/src/uint/sqrt.rs | 145 ++++++++ vendor/crypto-bigint/src/uint/sub.rs | 192 ++++++++++ vendor/crypto-bigint/src/uint/sub_mod.rs | 182 ++++++++++ vendor/crypto-bigint/src/wrapping.rs | 108 ++++++ vendor/crypto-bigint/tests/proptests.rs | 209 +++++++++++ 57 files changed, 7729 insertions(+) create mode 100644 vendor/crypto-bigint/.cargo-checksum.json create mode 100644 vendor/crypto-bigint/CHANGELOG.md create mode 100644 vendor/crypto-bigint/Cargo.toml create mode 100644 vendor/crypto-bigint/LICENSE-APACHE create mode 100644 vendor/crypto-bigint/LICENSE-MIT create mode 100644 vendor/crypto-bigint/README.md create mode 100644 vendor/crypto-bigint/src/array.rs create mode 100644 vendor/crypto-bigint/src/checked.rs create mode 100644 vendor/crypto-bigint/src/lib.rs create mode 100644 vendor/crypto-bigint/src/limb.rs create mode 100644 vendor/crypto-bigint/src/limb/add.rs create mode 100644 vendor/crypto-bigint/src/limb/bit_and.rs create mode 100644 vendor/crypto-bigint/src/limb/bit_not.rs create mode 100644 vendor/crypto-bigint/src/limb/bit_or.rs create mode 100644 vendor/crypto-bigint/src/limb/bit_xor.rs create mode 100644 vendor/crypto-bigint/src/limb/bits.rs create mode 100644 vendor/crypto-bigint/src/limb/cmp.rs create mode 100644 vendor/crypto-bigint/src/limb/encoding.rs create mode 100644 vendor/crypto-bigint/src/limb/from.rs create mode 100644 vendor/crypto-bigint/src/limb/mul.rs create mode 100644 vendor/crypto-bigint/src/limb/rand.rs create mode 100644 vendor/crypto-bigint/src/limb/shl.rs create mode 100644 vendor/crypto-bigint/src/limb/shr.rs create mode 100644 vendor/crypto-bigint/src/limb/sub.rs create mode 100644 vendor/crypto-bigint/src/nlimbs.rs create mode 100644 vendor/crypto-bigint/src/non_zero.rs create mode 100644 vendor/crypto-bigint/src/traits.rs create mode 100644 vendor/crypto-bigint/src/uint.rs create mode 100644 vendor/crypto-bigint/src/uint/add.rs create mode 100644 vendor/crypto-bigint/src/uint/add_mod.rs create mode 100644 vendor/crypto-bigint/src/uint/array.rs create mode 100644 vendor/crypto-bigint/src/uint/bit_and.rs create mode 100644 vendor/crypto-bigint/src/uint/bit_not.rs create mode 100644 vendor/crypto-bigint/src/uint/bit_or.rs create mode 100644 vendor/crypto-bigint/src/uint/bit_xor.rs create mode 100644 vendor/crypto-bigint/src/uint/bits.rs create mode 100644 vendor/crypto-bigint/src/uint/cmp.rs create mode 100644 vendor/crypto-bigint/src/uint/concat.rs create mode 100644 vendor/crypto-bigint/src/uint/div.rs create mode 100644 vendor/crypto-bigint/src/uint/encoding.rs create mode 100644 vendor/crypto-bigint/src/uint/encoding/der.rs create mode 100644 vendor/crypto-bigint/src/uint/encoding/rlp.rs create mode 100644 vendor/crypto-bigint/src/uint/from.rs create mode 100644 vendor/crypto-bigint/src/uint/inv_mod.rs create mode 100644 vendor/crypto-bigint/src/uint/mul.rs create mode 100644 vendor/crypto-bigint/src/uint/mul_mod.rs create mode 100644 vendor/crypto-bigint/src/uint/neg_mod.rs create mode 100644 vendor/crypto-bigint/src/uint/rand.rs create mode 100644 vendor/crypto-bigint/src/uint/resize.rs create mode 100644 vendor/crypto-bigint/src/uint/shl.rs create mode 100644 vendor/crypto-bigint/src/uint/shr.rs create mode 100644 vendor/crypto-bigint/src/uint/split.rs create mode 100644 vendor/crypto-bigint/src/uint/sqrt.rs create mode 100644 vendor/crypto-bigint/src/uint/sub.rs create mode 100644 vendor/crypto-bigint/src/uint/sub_mod.rs create mode 100644 vendor/crypto-bigint/src/wrapping.rs create mode 100644 vendor/crypto-bigint/tests/proptests.rs (limited to 'vendor/crypto-bigint') diff --git a/vendor/crypto-bigint/.cargo-checksum.json b/vendor/crypto-bigint/.cargo-checksum.json new file mode 100644 index 000000000..49e1f2cba --- /dev/null +++ b/vendor/crypto-bigint/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"51183f8239b77b2cf3214eba6cd149953e42088e38eb638a0eeb963ab2514497","Cargo.toml":"d6e917974ab2117117fac4dbb2d14d994c462e85c01f3a85e04410b89467bc60","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"90c503b61dee04e1449c323ec34c229dfb68d7adcb96c7e140ee55f70fce2d8e","README.md":"201b065b69baf2ee4e055b0d342864788c5d32744de2d8a8bea1b3dbfa5bfdd6","src/array.rs":"242b83d3b3a89f68e2c7b172df28a30cf6a5a1176c272b0512a6518a138ad5a3","src/checked.rs":"b2bc6e7c8886c7002ab5daa4dcc5c1c7d82a7b41f45d3112c7b623aec07941a9","src/lib.rs":"9a72c010366b0f67f3ef8636c34da5b6891e8f0d7cd8bdaf3a0ed359b3cb1cfa","src/limb.rs":"9cf16fdeef8336bd765a5bb7ff3137cf20d3c61aca049771f5113a3bcaffa1e2","src/limb/add.rs":"a6d02510472feef5d51c9dbdc59f4f9da2e442c5510a488f50b7af62f9a1eb8a","src/limb/bit_and.rs":"019f888b11e16fafbd0518eb5b583094a5e8490f59f458195db9306d86e1adaa","src/limb/bit_not.rs":"0fa7cdec58e1d3058daee88ef26f07ad0ba9e545505c97a130b47ef3fdf46c5a","src/limb/bit_or.rs":"1b5f85eac6768d460070da05b4a1257cceafff2b612b63a609aff3ff48df8480","src/limb/bit_xor.rs":"06acf291a316aa1fa2311bedd1a7aca5170564735c1f0b1103ed9e2c6f24f096","src/limb/bits.rs":"8f21514d463196fc01c9d2e5d13ea77af4ea7e6ee267c6d71f28e5d616811859","src/limb/cmp.rs":"169984e53efd043af5dd9ad0535685ed27cb60e36a80e3da992d40dcca3d255b","src/limb/encoding.rs":"a8962dd9046510f692293be7b049bbeed7ea57d26dcfcdae35dd796cf14bae96","src/limb/from.rs":"6217abd3d2380e099b332946fe87ed856b3a59c0f7885f85e4f1f0aad89fe8d4","src/limb/mul.rs":"342e20b79749683ccf9b77ee90dbbd666a24c6d50c78c187d54f90ad5b032cdd","src/limb/rand.rs":"495d5426ae12072e86435eae765ee8bc6cdee297c0552096ce28bb589600cf94","src/limb/shl.rs":"8d0cd4029375c7a66c0b72e21a8634a805cd5cc8c78320c1466f16ee0b16dbc7","src/limb/shr.rs":"c60b47739213722f7b4e7473d40be2f207484efbf8344cb7920558870aa5169c","src/limb/sub.rs":"6fe15fc5a2dcac9a23a072a4976db432c35805d2afdbfec44f18530f164f03d0","src/nlimbs.rs":"8938fb529a3f71396372df9d651f93ae6e399398e17c391c6d2a0a7352b21512","src/non_zero.rs":"cee6eec57b7489cdf4bbf088dcff554d0f5d9a4932a418ebd0c5d0e5d79503dd","src/traits.rs":"629251720bd7d54fb891c328326cd22fff37963239fb21f6d1afa677d1f9fa44","src/uint.rs":"b5494b6da6ba4f4adae6e6c2eecc0ad1d27728b6ec36e211798a48b33a3e1cf3","src/uint/add.rs":"4613f40921c6f02da14a1b066d949d8b95ea5a47c8d04a72e656c4257d481328","src/uint/add_mod.rs":"e6d938be5315a635348c274e961b07783a4d45b5f510a680b798b31044380cc2","src/uint/array.rs":"87d4daa5eb5a0601a186eb8759261f826e939317f14202564031b7a25b62e799","src/uint/bit_and.rs":"0a863260b07c900c89d3664171e415e3e7889a09c3f368fa6c3dc3ad4e3ffb7b","src/uint/bit_not.rs":"19cd1a760810651d66e488592a4c4d3451207caa269fe14b6efb69ee1acfcc44","src/uint/bit_or.rs":"a44b27ebb8821e8b029f173c4582bd4ff6e487f65c7ecba6861d43110cb1761a","src/uint/bit_xor.rs":"8c2011b1e51fbc8efbb1529d148e29b7e487da2b067f258b279eb32eee4f8504","src/uint/bits.rs":"955f63ad4dc51aabd26ef5c545bd185d99b5e7f5c4164a8da1dc28f39ebce509","src/uint/cmp.rs":"0db7e6f8bb362ae41c9a11e4ed97c79d2c206a9569542b55ccad8d2b29b406f7","src/uint/concat.rs":"b7efee5bb9e0a91cf4b3f24e16c2757de6fcfaec8ed94d7fbebb1fe66d5324e4","src/uint/div.rs":"6a16e6382a12bcce20ed7ad510f346972327fed81e29016e802801780dc0c12c","src/uint/encoding.rs":"566a4a739880c8f83dd3550cf5bbbc1fde5c7f2fb816253ebb46fe36301aa55b","src/uint/encoding/der.rs":"55b26c697d0886cd19e6b32480bbfd0c4ea600f7d89d26721c436685e7941b9b","src/uint/encoding/rlp.rs":"965f485eef2cac0b1316136119b83955d93ab6cb7f71cf93dfc94cb6a7938464","src/uint/from.rs":"25e97ecbe641560103ad611dbfbc5eb89fce08ae14a4a148e096f2b5fc882f7b","src/uint/inv_mod.rs":"e860561ab73822e01a03412589f2a064102830c3cf67b92b2e15ec14a6efebe2","src/uint/mul.rs":"1bbad1aa10897c7ebc90880009eee92896a6b6f0c490f93de7a6e3a97f3d0f18","src/uint/mul_mod.rs":"c0e6480b871606d61454f7e21459cd3264af69348909fcd9d2d761bce650083a","src/uint/neg_mod.rs":"8e37cd1d4f1d69f946be57fda2f8672731c09e334386478df0dcdab700abd0b6","src/uint/rand.rs":"a2b3f60a76e5117dee3320694e3a731fc123fd86c6685c7e63fd0f1578105131","src/uint/resize.rs":"f5c7a693492b02c4e2995f86cf814e87e47ed246bb2a8409df527318b163bec2","src/uint/shl.rs":"67850ed75e2e9acd82fb8dd88438938b7eda2093ddb18ce2ba320d94f0ae0d89","src/uint/shr.rs":"63f862573ece17b693dd20ff610557504b114512b194fed28bfab01961c87360","src/uint/split.rs":"f8038b0d00161a0ce6781b645b7de7fb7b0377dfb4e29c4faa150a685a0d8c67","src/uint/sqrt.rs":"35842add9b503a7f4694940ff5e975756c9f6c8ce0d99c8eecf8a710924c0ec0","src/uint/sub.rs":"37b87756e967b1ffdc4ba6284435940023974c49d4b3ec646d31c228da2e13e5","src/uint/sub_mod.rs":"9bf319958df7abfc8eaa2da740bde7072c41d263647b57312701fc597284f60f","src/wrapping.rs":"8d2ed844ce2978ac4f89d73eb2aca968664266ec811f53e8992717b499564a81","tests/proptests.rs":"58d142bf9666ae7b1b5eaf4a1a2e54d8d646a6a89c1aeaddeab5d22307111c22"},"package":"ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef"} \ No newline at end of file diff --git a/vendor/crypto-bigint/CHANGELOG.md b/vendor/crypto-bigint/CHANGELOG.md new file mode 100644 index 000000000..58dcb4575 --- /dev/null +++ b/vendor/crypto-bigint/CHANGELOG.md @@ -0,0 +1,273 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 0.4.9 (2022-10-11) +### Added +- `UInt::from_word` and `::from_wide_word` ([#105]) +- `UInt` modulo operations for special moduli ([#108]) +- Non-const `UInt` decoding from an array ([#110]) +- `const fn` impls of `concat` and `split` ([#111]) +- `Limb` left/right bitshifts ([#112]) +- `UInt::LIMBS` constant ([#114]) + +### Changed +- Optimize `UInt::neg_mod` by simply calling `::sub_mod` ([#106]) +- Relax bounds for `UInt::add_mod` and `::sub_mod` ([#104]) +- Always inline `Limb::bitand` ([#109]) +- Faster const decoding of UInt ([#113]) +- Optimize `UInt::neg_mod` ([#127]) +- Faster comparisons ([#128]) +- `UInt::resize` ([#129]) +- `UInt::bit` accessor methods ([#122]) + +### Fixed +- Constant-time behaviour for `ct_reduce`/`ct_div_rem` ([#117]) + +[#104]: https://github.com/RustCrypto/crypto-bigint/pull/104 +[#105]: https://github.com/RustCrypto/crypto-bigint/pull/105 +[#106]: https://github.com/RustCrypto/crypto-bigint/pull/106 +[#108]: https://github.com/RustCrypto/crypto-bigint/pull/108 +[#109]: https://github.com/RustCrypto/crypto-bigint/pull/109 +[#110]: https://github.com/RustCrypto/crypto-bigint/pull/110 +[#111]: https://github.com/RustCrypto/crypto-bigint/pull/111 +[#112]: https://github.com/RustCrypto/crypto-bigint/pull/112 +[#113]: https://github.com/RustCrypto/crypto-bigint/pull/113 +[#114]: https://github.com/RustCrypto/crypto-bigint/pull/114 +[#117]: https://github.com/RustCrypto/crypto-bigint/pull/117 +[#122]: https://github.com/RustCrypto/crypto-bigint/pull/122 +[#127]: https://github.com/RustCrypto/crypto-bigint/pull/127 +[#128]: https://github.com/RustCrypto/crypto-bigint/pull/128 +[#129]: https://github.com/RustCrypto/crypto-bigint/pull/129 + +## 0.4.8 (2022-06-30) +### Added +- `Word` as a replacement for `LimbUInt` ([#88]) +- `WideWord` as a replacement for `WideLimbUInt` ([#88]) +- `UInt::*_words` as a replacement for `UInt::*_uint_array` ([#88]) + +### Changed +- Deprecated `*LimbUInt` and `UInt::*_uint_array` ([#88]) + +[#88]: https://github.com/RustCrypto/crypto-bigint/pull/88 + +## 0.4.7 (2022-06-12) +### Added +- `Encoding` tests ([#93]) + +### Changed +- Use const generic impls of `*Mod` traits ([#98]) + +[#93]: https://github.com/RustCrypto/crypto-bigint/pull/93 +[#98]: https://github.com/RustCrypto/crypto-bigint/pull/98 + +## 0.4.6 (2022-06-12) +### Added +- Impl `ArrayEncoding` for `U576` ([#96]) + +[#96]: https://github.com/RustCrypto/crypto-bigint/pull/96 + +## 0.4.5 (2022-06-12) +### Added +- `serde` support ([#73]) +- `U576` type alias ([#94]) + +[#73]: https://github.com/RustCrypto/crypto-bigint/pull/73 +[#94]: https://github.com/RustCrypto/crypto-bigint/pull/94 + +## 0.4.4 (2022-06-02) +### Added +- `UInt::as_uint_array` ([#91]) + +[#91]: https://github.com/RustCrypto/crypto-bigint/pull/91 + +## 0.4.3 (2022-05-31) +### Added +- Impl `AsRef`/`AsMut<[LimbUInt]>` for `UInt` ([#89]) + +[#89]: https://github.com/RustCrypto/crypto-bigint/pull/89 + +## 0.4.2 (2022-05-18) +### Added +- `UInt::inv_mod2k` ([#86]) + +### Fixed +- Wrong results for remainder ([#84]) + +[#84]: https://github.com/RustCrypto/crypto-bigint/pull/84 +[#86]: https://github.com/RustCrypto/crypto-bigint/pull/86 + +## 0.4.1 (2022-05-10) +### Fixed +- Bug in `from_le_slice` ([#82]) + +[#82]: https://github.com/RustCrypto/crypto-bigint/pull/82 + +## 0.4.0 (2022-05-08) [YANKED] + +NOTE: this release was yanked due to [#82]. + +### Added +- Const-friendly `NonZero` from `UInt` ([#56]) +- Optional `der` feature ([#61], [#80]) + +### Changed +- Use `const_panic`; MSRV 1.57 ([#60]) +- 2021 edition ([#60]) + +### Fixed +- Pad limbs with zeros when displaying hexadecimal representation ([#74]) + +[#56]: https://github.com/RustCrypto/crypto-bigint/pull/56 +[#60]: https://github.com/RustCrypto/crypto-bigint/pull/60 +[#61]: https://github.com/RustCrypto/crypto-bigint/pull/61 +[#74]: https://github.com/RustCrypto/crypto-bigint/pull/74 +[#80]: https://github.com/RustCrypto/crypto-bigint/pull/80 + +## 0.3.2 (2021-11-17) +### Added +- `Output = Self` to all bitwise ops on `Integer` trait ([#53]) + +[#53]: https://github.com/RustCrypto/crypto-bigint/pull/53 + +## 0.3.1 (2021-11-17) +### Added +- Bitwise ops to `Integer` trait ([#51]) + +[#51]: https://github.com/RustCrypto/crypto-bigint/pull/51 + +## 0.3.0 (2021-11-14) [YANKED] +### Added +- Bitwise `Xor`/`Not` operations ([#27]) +- `Zero` trait ([#35]) +- `Checked*` traits ([#41]) +- `prelude` module ([#45]) +- `saturating_*` ops ([#47]) + +### Changed +- Rust 2021 edition upgrade; MSRV 1.56 ([#33]) +- Reverse ordering of `UInt::mul_wide` return tuple ([#34]) +- Have `Div` and `Rem` impls always take `NonZero` args ([#39]) +- Rename `limb::Inner` to `LimbUInt` ([#40]) +- Make `limb` module private ([#40]) +- Use `Zero`/`Integer` traits for `is_zero`, `is_odd`, and `is_even` ([#46]) + +### Fixed +- `random_mod` performance for small moduli ([#36]) +- `NonZero` moduli ([#36]) + +### Removed +- Deprecated `LIMB_BYTES` constant ([#43]) + +[#27]: https://github.com/RustCrypto/crypto-bigint/pull/27 +[#33]: https://github.com/RustCrypto/crypto-bigint/pull/33 +[#34]: https://github.com/RustCrypto/crypto-bigint/pull/34 +[#35]: https://github.com/RustCrypto/crypto-bigint/pull/35 +[#36]: https://github.com/RustCrypto/crypto-bigint/pull/36 +[#39]: https://github.com/RustCrypto/crypto-bigint/pull/39 +[#40]: https://github.com/RustCrypto/crypto-bigint/pull/40 +[#41]: https://github.com/RustCrypto/crypto-bigint/pull/41 +[#43]: https://github.com/RustCrypto/crypto-bigint/pull/43 +[#45]: https://github.com/RustCrypto/crypto-bigint/pull/45 +[#46]: https://github.com/RustCrypto/crypto-bigint/pull/46 +[#47]: https://github.com/RustCrypto/crypto-bigint/pull/47 + +## 0.2.11 (2021-10-16) +### Added +- `AddMod` proptests ([#24]) +- Bitwise `And`/`Or` operations ([#25]) + +[#24]: https://github.com/RustCrypto/crypto-bigint/pull/24 +[#25]: https://github.com/RustCrypto/crypto-bigint/pull/25 + +## 0.2.10 (2021-09-21) +### Added +- `ArrayDecoding` trait ([#12]) +- `NonZero` wrapper ([#13], [#16]) +- Impl `Div`/`Rem` for `NonZero` ([#14]) + +[#12]: https://github.com/RustCrypto/crypto-bigint/pull/12 +[#13]: https://github.com/RustCrypto/crypto-bigint/pull/13 +[#14]: https://github.com/RustCrypto/crypto-bigint/pull/14 +[#16]: https://github.com/RustCrypto/crypto-bigint/pull/16 + +## 0.2.9 (2021-09-16) +### Added +- `UInt::sqrt` ([#9]) + +### Changed +- Make `UInt` division similar to other interfaces ([#8]) + +[#8]: https://github.com/RustCrypto/crypto-bigint/pull/8 +[#9]: https://github.com/RustCrypto/crypto-bigint/pull/9 + +## 0.2.8 (2021-09-14) [YANKED] +### Added +- Implement constant-time division and modulo operations + +### Changed +- Moved from RustCrypto/utils to RustCrypto/crypto-bigint repo ([#2]) + +[#2]: https://github.com/RustCrypto/crypto-bigint/pull/2 + +## 0.2.7 (2021-09-12) +### Added +- `UInt::shl_vartime` + +### Fixed +- `add_mod` overflow handling + +## 0.2.6 (2021-09-08) +### Added +- `Integer` trait +- `ShrAssign` impl for `UInt` +- Recursive Length Prefix (RLP) encoding support for `UInt` + +## 0.2.5 (2021-09-02) +### Fixed +- `ConditionallySelectable` impl for `UInt` + +## 0.2.4 (2021-08-23) [YANKED] +### Added +- Expose `limb` module +- `[limb::Inner; LIMBS]` conversions for `UInt` +- Bitwise right shift support for `UInt` ([#586], [#590]) + +## 0.2.3 (2021-08-16) [YANKED] +### Fixed +- `UInt::wrapping_mul` + +### Added +- Implement the `Hash` trait for `UInt` and `Limb` + +## 0.2.2 (2021-06-26) [YANKED] +### Added +- `Limb::is_odd` and `UInt::is_odd` +- `UInt::new` +- `rand` feature + +### Changed +- Deprecate `LIMB_BYTES` constant +- Make `Limb`'s `Inner` value public + +## 0.2.1 (2021-06-21) [YANKED] +### Added +- `Limb` newtype +- Target-specific rustdocs + +## 0.2.0 (2021-06-07) [YANKED] +### Added +- `ConstantTimeGreater`/`ConstantTimeLess` impls for UInt +- `From` conversions between `UInt` and limb arrays +- `zeroize` feature +- Additional `ArrayEncoding::ByteSize` bounds +- `UInt::into_limbs` +- `Encoding` trait + +### Removed +- `NumBits`/`NumBytes` traits; use `Encoding` instead + +## 0.1.0 (2021-05-30) +- Initial release diff --git a/vendor/crypto-bigint/Cargo.toml b/vendor/crypto-bigint/Cargo.toml new file mode 100644 index 000000000..20d8afd9f --- /dev/null +++ b/vendor/crypto-bigint/Cargo.toml @@ -0,0 +1,107 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.57" +name = "crypto-bigint" +version = "0.4.9" +authors = ["RustCrypto Developers"] +description = """ +Pure Rust implementation of a big integer library which has been designed from +the ground-up for use in cryptographic applications. Provides constant-time, +no_std-friendly implementations of modern formulas using const generics. +""" +readme = "README.md" +keywords = [ + "arbitrary", + "crypto", + "bignum", + "integer", + "precision", +] +categories = [ + "algorithms", + "cryptography", + "data-structures", + "mathematics", + "no-std", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/RustCrypto/crypto-bigint" +resolver = "2" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[dependencies.der] +version = "0.6" +optional = true +default-features = false + +[dependencies.generic-array] +version = "0.14" +optional = true + +[dependencies.rand_core] +version = "0.6" +optional = true + +[dependencies.rlp] +version = "0.5" +optional = true +default-features = false + +[dependencies.serdect] +version = "0.1" +optional = true +default-features = false + +[dependencies.subtle] +version = "2.4" +default-features = false + +[dependencies.zeroize] +version = "1" +optional = true +default-features = false + +[dev-dependencies.bincode] +version = "1" + +[dev-dependencies.hex-literal] +version = "0.3" + +[dev-dependencies.num-bigint] +version = "0.4" + +[dev-dependencies.num-traits] +version = "0.2" + +[dev-dependencies.proptest] +version = "1" + +[dev-dependencies.rand_chacha] +version = "0.3" + +[dev-dependencies.rand_core] +version = "0.6" +features = ["std"] + +[features] +alloc = [] +default = ["rand"] +rand = ["rand_core/std"] +serde = ["serdect"] diff --git a/vendor/crypto-bigint/LICENSE-APACHE b/vendor/crypto-bigint/LICENSE-APACHE new file mode 100644 index 000000000..78173fa2e --- /dev/null +++ b/vendor/crypto-bigint/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/crypto-bigint/LICENSE-MIT b/vendor/crypto-bigint/LICENSE-MIT new file mode 100644 index 000000000..c869ada57 --- /dev/null +++ b/vendor/crypto-bigint/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2021 The RustCrypto Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/crypto-bigint/README.md b/vendor/crypto-bigint/README.md new file mode 100644 index 000000000..f4e3daba9 --- /dev/null +++ b/vendor/crypto-bigint/README.md @@ -0,0 +1,64 @@ +# [RustCrypto]: Cryptographic Big Integers + +[![crate][crate-image]][crate-link] +[![Docs][docs-image]][docs-link] +[![Build Status][build-image]][build-link] +![Apache2/MIT licensed][license-image] +![Rust Version][rustc-image] +[![Project Chat][chat-image]][chat-link] + +Pure Rust implementation of a big integer library which has been designed from +the ground-up for use in cryptographic applications. + +Provides constant-time, `no_std`-friendly implementations of modern formulas +using const generics. + +[Documentation][docs-link] + +## Goals + +- No heap allocations. `no_std`-friendly. +- Constant-time by default. Variable-time functions are explicitly marked as such. +- Leverage what is possible today with const generics on `stable` rust. +- Support `const fn` as much as possible, including decoding big integers from + bytes/hex and performing arithmetic operations on them, with the goal of + being able to compute values at compile-time. + +## Minimum Supported Rust Version + +This crate requires **Rust 1.57** at a minimum. + +We may change the MSRV in the future, but it will be accompanied by a minor +version bump. + +## License + +Licensed under either of: + +- [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) +- [MIT license](http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. + +[//]: # (badges) + +[crate-image]: https://buildstats.info/crate/crypto-bigint +[crate-link]: https://crates.io/crates/crypto-bigint +[docs-image]: https://docs.rs/crypto-bigint/badge.svg +[docs-link]: https://docs.rs/crypto-bigint/ +[build-image]: https://github.com/RustCrypto/crypto-bigint/actions/workflows/crypto-bigint.yml/badge.svg +[build-link]: https://github.com/RustCrypto/crypto-bigint/actions/workflows/crypto-bigint.yml +[license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg +[rustc-image]: https://img.shields.io/badge/rustc-1.57+-blue.svg +[chat-image]: https://img.shields.io/badge/zulip-join_chat-blue.svg +[chat-link]: https://rustcrypto.zulipchat.com/#narrow/stream/300602-crypto-bigint + +[//]: # (links) + +[RustCrypto]: https://github.com/rustcrypto diff --git a/vendor/crypto-bigint/src/array.rs b/vendor/crypto-bigint/src/array.rs new file mode 100644 index 000000000..ea4f437ef --- /dev/null +++ b/vendor/crypto-bigint/src/array.rs @@ -0,0 +1,41 @@ +//! Interop support for `generic-array` + +use crate::{Encoding, Integer}; +use core::ops::Add; +use generic_array::{typenum::Unsigned, ArrayLength, GenericArray}; + +/// Alias for a byte array whose size is defined by [`ArrayEncoding::ByteSize`]. +#[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))] +pub type ByteArray = GenericArray::ByteSize>; + +/// Support for encoding a big integer as a `GenericArray`. +#[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))] +pub trait ArrayEncoding: Encoding { + /// Size of a byte array which encodes a big integer. + type ByteSize: ArrayLength + Add + Eq + Ord + Unsigned; + + /// Deserialize from a big-endian byte array. + fn from_be_byte_array(bytes: ByteArray) -> Self; + + /// Deserialize from a little-endian byte array. + fn from_le_byte_array(bytes: ByteArray) -> Self; + + /// Serialize to a big-endian byte array. + fn to_be_byte_array(&self) -> ByteArray; + + /// Serialize to a little-endian byte array. + fn to_le_byte_array(&self) -> ByteArray; +} + +/// Support for decoding a `GenericArray` as a big integer. +#[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))] +pub trait ArrayDecoding { + /// Big integer which decodes a `GenericArray`. + type Output: ArrayEncoding + Integer; + + /// Deserialize from a big-endian `GenericArray`. + fn into_uint_be(self) -> Self::Output; + + /// Deserialize from a little-endian `GenericArray`. + fn into_uint_le(self) -> Self::Output; +} diff --git a/vendor/crypto-bigint/src/checked.rs b/vendor/crypto-bigint/src/checked.rs new file mode 100644 index 000000000..e02317e32 --- /dev/null +++ b/vendor/crypto-bigint/src/checked.rs @@ -0,0 +1,131 @@ +//! Checked arithmetic. + +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +#[cfg(feature = "serde")] +use serdect::serde::{Deserialize, Deserializer, Serialize, Serializer}; + +/// Provides intentionally-checked arithmetic on `T`. +/// +/// Internally this leverages the [`CtOption`] type from the [`subtle`] crate +/// in order to handle overflows in constant time. +#[derive(Copy, Clone, Debug)] +pub struct Checked(pub CtOption); + +impl Checked { + /// Create a new checked arithmetic wrapper for the given value. + pub fn new(val: T) -> Self { + Self(CtOption::new(val, Choice::from(1))) + } +} + +impl Default for Checked +where + T: Default, +{ + fn default() -> Self { + Self::new(T::default()) + } +} + +impl ConditionallySelectable for Checked { + #[inline] + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Self(CtOption::conditional_select(&a.0, &b.0, choice)) + } +} + +impl ConstantTimeEq for Checked { + #[inline] + fn ct_eq(&self, rhs: &Self) -> Choice { + self.0.ct_eq(&rhs.0) + } +} + +impl From> for CtOption { + fn from(checked: Checked) -> CtOption { + checked.0 + } +} + +impl From> for Checked { + fn from(ct_option: CtOption) -> Checked { + Checked(ct_option) + } +} + +impl From> for Option { + fn from(checked: Checked) -> Option { + checked.0.into() + } +} + +#[cfg(feature = "serde")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] +impl<'de, T: Default + Deserialize<'de>> Deserialize<'de> for Checked { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let value = Option::::deserialize(deserializer)?; + let choice = Choice::from(value.is_some() as u8); + Ok(Self(CtOption::new(value.unwrap_or_default(), choice))) + } +} + +#[cfg(feature = "serde")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] +impl<'de, T: Copy + Serialize> Serialize for Checked { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + Option::::from(self.0).serialize(serializer) + } +} + +#[cfg(all(test, feature = "serde"))] +mod tests { + use crate::{Checked, U64}; + use subtle::{Choice, ConstantTimeEq, CtOption}; + + #[test] + fn serde() { + let test = Checked::new(U64::from_u64(0x0011223344556677)); + + let serialized = bincode::serialize(&test).unwrap(); + let deserialized: Checked = bincode::deserialize(&serialized).unwrap(); + + assert!(bool::from(test.ct_eq(&deserialized))); + + let test = Checked::new(U64::ZERO) - Checked::new(U64::ONE); + assert!(bool::from( + test.ct_eq(&Checked(CtOption::new(U64::ZERO, Choice::from(0)))) + )); + + let serialized = bincode::serialize(&test).unwrap(); + let deserialized: Checked = bincode::deserialize(&serialized).unwrap(); + + assert!(bool::from(test.ct_eq(&deserialized))); + } + + #[test] + fn serde_owned() { + let test = Checked::new(U64::from_u64(0x0011223344556677)); + + let serialized = bincode::serialize(&test).unwrap(); + let deserialized: Checked = bincode::deserialize_from(serialized.as_slice()).unwrap(); + + assert!(bool::from(test.ct_eq(&deserialized))); + + let test = Checked::new(U64::ZERO) - Checked::new(U64::ONE); + assert!(bool::from( + test.ct_eq(&Checked(CtOption::new(U64::ZERO, Choice::from(0)))) + )); + + let serialized = bincode::serialize(&test).unwrap(); + let deserialized: Checked = bincode::deserialize_from(serialized.as_slice()).unwrap(); + + assert!(bool::from(test.ct_eq(&deserialized))); + } +} diff --git a/vendor/crypto-bigint/src/lib.rs b/vendor/crypto-bigint/src/lib.rs new file mode 100644 index 000000000..4d376421c --- /dev/null +++ b/vendor/crypto-bigint/src/lib.rs @@ -0,0 +1,199 @@ +#![no_std] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc = include_str!("../README.md")] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg", + html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg" +)] +#![deny(unsafe_code)] +#![warn( + clippy::unwrap_used, + missing_docs, + missing_debug_implementations, + missing_copy_implementations, + rust_2018_idioms, + trivial_casts, + trivial_numeric_casts, + unused_qualifications +)] + +//! ## Usage +//! +//! This crate defines a [`UInt`] type which is const generic around an inner +//! [`Limb`] array, where a [`Limb`] is a newtype for a word-sized integer. +//! Thus large integers are represented as a arrays of smaller integers which +//! are sized appropriately for the CPU, giving us some assurances of how +//! arithmetic operations over those smaller integers will behave. +//! +//! To obtain appropriately sized integers regardless of what a given CPU's +//! word size happens to be, a number of portable type aliases are provided for +//! integer sizes commonly used in cryptography, for example: +//! [`U128`], [`U384`], [`U256`], [`U2048`], [`U3072`], [`U4096`]. +//! +//! ### `const fn` usage +//! +//! The [`UInt`] type provides a number of `const fn` inherent methods which +//! can be used for initializing and performing arithmetic on big integers in +//! const contexts: +//! +//! ``` +//! use crypto_bigint::U256; +//! +//! // Parse a constant from a big endian hexadecimal string. +//! pub const MODULUS: U256 = +//! U256::from_be_hex("ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551"); +//! +//! // Compute `MODULUS` shifted right by 1 at compile time +//! pub const MODULUS_SHR1: U256 = MODULUS.shr_vartime(1); +//! ``` +//! +//! ### Trait-based usage +//! +//! The [`UInt`] type itself does not implement the standard arithmetic traits +//! such as [`Add`], [`Sub`], [`Mul`], and [`Div`]. +//! +//! To use these traits you must first pick a wrapper type which determines +//! overflow behavior: [`Wrapping`] or [`Checked`]. +//! +//! #### Wrapping arithmetic +//! +//! ``` +//! use crypto_bigint::{U256, Wrapping}; +//! +//! let a = Wrapping(U256::MAX); +//! let b = Wrapping(U256::ONE); +//! let c = a + b; +//! +//! // `MAX` + 1 wraps back around to zero +//! assert_eq!(c.0, U256::ZERO); +//! ``` +//! +//! #### Checked arithmetic +//! +//! ``` +//! use crypto_bigint::{U256, Checked}; +//! +//! let a = Checked::new(U256::ONE); +//! let b = Checked::new(U256::from(2u8)); +//! let c = a + b; +//! assert_eq!(c.0.unwrap(), U256::from(3u8)) +//! ``` +//! +//! ### Modular arithmetic +//! +//! This library has initial support for modular arithmetic in the form of the +//! [`AddMod`], [`SubMod`], [`NegMod`], and [`MulMod`] traits, as well as the +//! support for the [`Rem`] trait when used with a [`NonZero`] operand. +//! +//! ``` +//! use crypto_bigint::{AddMod, U256}; +//! +//! // mod 3 +//! let modulus = U256::from(3u8); +//! +//! // 1 + 1 mod 3 = 2 +//! let a = U256::ONE.add_mod(&U256::ONE, &modulus); +//! assert_eq!(a, U256::from(2u8)); +//! +//! // 2 + 1 mod 3 = 0 +//! let b = a.add_mod(&U256::ONE, &modulus); +//! assert_eq!(b, U256::ZERO); +//! ``` +//! +//! ### Random number generation +//! +//! When the `rand_core` or `rand` features of this crate are enabled, it's +//! possible to generate random numbers using any [`CryptoRng`] by using the +//! [`Random`] trait: +//! +//! ``` +//! # #[cfg(feature = "rand")] +//! # { +//! use crypto_bigint::{Random, U256, rand_core::OsRng}; +//! +//! let n = U256::random(&mut OsRng); +//! # } +//! ``` +//! +//! #### Modular random number generation +//! +//! The [`RandomMod`] trait supports generating random numbers with a uniform +//! distribution around a given [`NonZero`] modulus. +//! +//! ``` +//! # #[cfg(feature = "rand")] +//! # { +//! use crypto_bigint::{NonZero, RandomMod, U256, rand_core::OsRng}; +//! +//! let modulus = NonZero::new(U256::from(3u8)).unwrap(); +//! let n = U256::random_mod(&mut OsRng, &modulus); +//! # } +//! ``` +//! +//! [`Add`]: core::ops::Add +//! [`Div`]: core::ops::Div +//! [`Mul`]: core::ops::Mul +//! [`Rem`]: core::ops::Rem +//! [`Sub`]: core::ops::Sub +//! [`CryptoRng`]: rand_core::CryptoRng + +#[cfg(all(feature = "alloc", test))] +extern crate alloc; + +#[macro_use] +mod nlimbs; + +#[cfg(feature = "generic-array")] +mod array; +mod checked; +mod limb; +mod non_zero; +mod traits; +mod uint; +mod wrapping; + +pub use crate::{ + checked::Checked, + limb::{Limb, WideWord, Word}, + non_zero::NonZero, + traits::*, + uint::*, + wrapping::Wrapping, +}; +pub use subtle; + +// TODO(tarcieri): remove these in the next breaking release +#[allow(deprecated)] +pub use crate::limb::{LimbUInt, WideLimbUInt}; + +pub(crate) use limb::{SignedWord, WideSignedWord}; + +#[cfg(feature = "generic-array")] +pub use { + crate::array::{ArrayDecoding, ArrayEncoding, ByteArray}, + generic_array::{self, typenum::consts}, +}; + +#[cfg(feature = "rand_core")] +pub use rand_core; + +#[cfg(feature = "rlp")] +pub use rlp; + +#[cfg(feature = "zeroize")] +pub use zeroize; + +/// Import prelude for this crate: includes important traits. +pub mod prelude { + pub use crate::traits::*; + + #[cfg(feature = "generic-array")] + pub use crate::array::{ArrayDecoding, ArrayEncoding}; +} + +#[cfg(sidefuzz)] +#[no_mangle] +pub extern "C" fn fuzz() { + let input = sidefuzz::fetch_input(32); // 32 bytes of of fuzzing input as a &[u8] + sidefuzz::black_box(my_hopefully_constant_fn(input)); +} diff --git a/vendor/crypto-bigint/src/limb.rs b/vendor/crypto-bigint/src/limb.rs new file mode 100644 index 000000000..df3ae7a72 --- /dev/null +++ b/vendor/crypto-bigint/src/limb.rs @@ -0,0 +1,189 @@ +//! Big integers are represented as an array of smaller CPU word-size integers +//! called "limbs". + +#![allow(clippy::derive_hash_xor_eq)] + +mod add; +mod bit_and; +mod bit_not; +mod bit_or; +mod bit_xor; +mod bits; +mod cmp; +mod encoding; +mod from; +mod mul; +mod shl; +mod shr; +mod sub; + +#[cfg(feature = "rand_core")] +mod rand; + +use crate::Zero; +use core::fmt; +use subtle::{Choice, ConditionallySelectable}; + +#[cfg(feature = "serde")] +use serdect::serde::{Deserialize, Deserializer, Serialize, Serializer}; + +#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] +compile_error!("this crate builds on 32-bit and 64-bit platforms only"); + +// +// 32-bit definitions +// + +/// Inner integer type that the [`Limb`] newtype wraps. +#[cfg(target_pointer_width = "32")] +pub type Word = u32; + +/// Signed integer type that corresponds to [`Word`]. +#[cfg(target_pointer_width = "32")] +pub(crate) type SignedWord = i32; + +/// Unsigned wide integer type: double the width of [`Word`]. +#[cfg(target_pointer_width = "32")] +pub type WideWord = u64; + +/// Signed wide integer type: double the width of [`Limb`]. +#[cfg(target_pointer_width = "32")] +pub(crate) type WideSignedWord = i64; + +// +// 64-bit definitions +// + +/// Unsigned integer type that the [`Limb`] newtype wraps. +#[cfg(target_pointer_width = "64")] +pub type Word = u64; + +/// Signed integer type that corresponds to [`Word`]. +#[cfg(target_pointer_width = "64")] +pub(crate) type SignedWord = i64; + +/// Wide integer type: double the width of [`Word`]. +#[cfg(target_pointer_width = "64")] +pub type WideWord = u128; + +/// Signed wide integer type: double the width of [`SignedWord`]. +#[cfg(target_pointer_width = "64")] +pub(crate) type WideSignedWord = i128; + +// +// Deprecated legacy names +// + +// TODO(tarcieri): remove these in the next breaking release + +/// Deprecated: unsigned integer type that the [`Limb`] newtype wraps. +#[deprecated(since = "0.4.8", note = "please use `Word` instead")] +pub type LimbUInt = Word; + +/// Deprecated: wide integer type which is double the width of [`Word`]. +#[deprecated(since = "0.4.8", note = "please use `WideWord` instead")] +pub type WideLimbUInt = WideWord; + +/// Highest bit in a [`Limb`]. +pub(crate) const HI_BIT: usize = Limb::BIT_SIZE - 1; + +/// Big integers are represented as an array of smaller CPU word-size integers +/// called "limbs". +#[derive(Copy, Clone, Debug, Default, Hash)] +#[repr(transparent)] +pub struct Limb(pub Word); + +impl Limb { + /// The value `0`. + pub const ZERO: Self = Limb(0); + + /// The value `1`. + pub const ONE: Self = Limb(1); + + /// Maximum value this [`Limb`] can express. + pub const MAX: Self = Limb(Word::MAX); + + // 32-bit + + /// Size of the inner integer in bits. + #[cfg(target_pointer_width = "32")] + pub const BIT_SIZE: usize = 32; + /// Size of the inner integer in bytes. + #[cfg(target_pointer_width = "32")] + pub const BYTE_SIZE: usize = 4; + + // 64-bit + + /// Size of the inner integer in bits. + #[cfg(target_pointer_width = "64")] + pub const BIT_SIZE: usize = 64; + /// Size of the inner integer in bytes. + #[cfg(target_pointer_width = "64")] + pub const BYTE_SIZE: usize = 8; + + /// Return `a` if `c`==0 or `b` if `c`==`Word::MAX`. + /// + /// Const-friendly: we can't yet use `subtle` in `const fn` contexts. + #[inline] + pub(crate) const fn ct_select(a: Self, b: Self, c: Word) -> Self { + Self(a.0 ^ (c & (a.0 ^ b.0))) + } +} + +impl ConditionallySelectable for Limb { + #[inline] + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Self(Word::conditional_select(&a.0, &b.0, choice)) + } +} + +impl Zero for Limb { + const ZERO: Self = Self::ZERO; +} + +impl fmt::Display for Limb { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::UpperHex::fmt(self, f) + } +} + +impl fmt::LowerHex for Limb { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:0width$x}", &self.0, width = Self::BYTE_SIZE * 2) + } +} + +impl fmt::UpperHex for Limb { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:0width$X}", &self.0, width = Self::BYTE_SIZE * 2) + } +} + +#[cfg(feature = "serde")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] +impl<'de> Deserialize<'de> for Limb { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Ok(Self(Word::deserialize(deserializer)?)) + } +} + +#[cfg(feature = "serde")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] +impl<'de> Serialize for Limb { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.0.serialize(serializer) + } +} + +#[cfg(feature = "zeroize")] +#[cfg_attr(docsrs, doc(cfg(feature = "zeroize")))] +impl zeroize::DefaultIsZeroes for Limb {} diff --git a/vendor/crypto-bigint/src/limb/add.rs b/vendor/crypto-bigint/src/limb/add.rs new file mode 100644 index 000000000..ddeda9e67 --- /dev/null +++ b/vendor/crypto-bigint/src/limb/add.rs @@ -0,0 +1,180 @@ +//! Limb addition + +use crate::{Checked, CheckedAdd, Limb, WideWord, Word, Wrapping, Zero}; +use core::ops::{Add, AddAssign}; +use subtle::CtOption; + +impl Limb { + /// Computes `self + rhs + carry`, returning the result along with the new carry. + #[inline(always)] + pub const fn adc(self, rhs: Limb, carry: Limb) -> (Limb, Limb) { + let a = self.0 as WideWord; + let b = rhs.0 as WideWord; + let carry = carry.0 as WideWord; + let ret = a + b + carry; + (Limb(ret as Word), Limb((ret >> Self::BIT_SIZE) as Word)) + } + + /// Perform saturating addition. + #[inline] + pub const fn saturating_add(&self, rhs: Self) -> Self { + Limb(self.0.saturating_add(rhs.0)) + } + + /// Perform wrapping addition, discarding overflow. + #[inline(always)] + pub const fn wrapping_add(&self, rhs: Self) -> Self { + Limb(self.0.wrapping_add(rhs.0)) + } +} + +impl CheckedAdd for Limb { + type Output = Self; + + #[inline] + fn checked_add(&self, rhs: Self) -> CtOption { + let (result, carry) = self.adc(rhs, Limb::ZERO); + CtOption::new(result, carry.is_zero()) + } +} + +impl Add for Wrapping { + type Output = Self; + + fn add(self, rhs: Self) -> Wrapping { + Wrapping(self.0.wrapping_add(rhs.0)) + } +} + +impl Add<&Wrapping> for Wrapping { + type Output = Wrapping; + + fn add(self, rhs: &Wrapping) -> Wrapping { + Wrapping(self.0.wrapping_add(rhs.0)) + } +} + +impl Add> for &Wrapping { + type Output = Wrapping; + + fn add(self, rhs: Wrapping) -> Wrapping { + Wrapping(self.0.wrapping_add(rhs.0)) + } +} + +impl Add<&Wrapping> for &Wrapping { + type Output = Wrapping; + + fn add(self, rhs: &Wrapping) -> Wrapping { + Wrapping(self.0.wrapping_add(rhs.0)) + } +} + +impl AddAssign for Wrapping { + fn add_assign(&mut self, other: Self) { + *self = *self + other; + } +} + +impl AddAssign<&Wrapping> for Wrapping { + fn add_assign(&mut self, other: &Self) { + *self = *self + other; + } +} + +impl Add for Checked { + type Output = Self; + + fn add(self, rhs: Self) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(rhs))), + ) + } +} + +impl Add<&Checked> for Checked { + type Output = Checked; + + fn add(self, rhs: &Checked) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(rhs))), + ) + } +} + +impl Add> for &Checked { + type Output = Checked; + + fn add(self, rhs: Checked) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(rhs))), + ) + } +} + +impl Add<&Checked> for &Checked { + type Output = Checked; + + fn add(self, rhs: &Checked) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(rhs))), + ) + } +} + +impl AddAssign for Checked { + fn add_assign(&mut self, other: Self) { + *self = *self + other; + } +} + +impl AddAssign<&Checked> for Checked { + fn add_assign(&mut self, other: &Self) { + *self = *self + other; + } +} + +#[cfg(test)] +mod tests { + use crate::{CheckedAdd, Limb}; + + #[test] + fn adc_no_carry() { + let (res, carry) = Limb::ZERO.adc(Limb::ONE, Limb::ZERO); + assert_eq!(res, Limb::ONE); + assert_eq!(carry, Limb::ZERO); + } + + #[test] + fn adc_with_carry() { + let (res, carry) = Limb::MAX.adc(Limb::ONE, Limb::ZERO); + assert_eq!(res, Limb::ZERO); + assert_eq!(carry, Limb::ONE); + } + + #[test] + fn wrapping_add_no_carry() { + assert_eq!(Limb::ZERO.wrapping_add(Limb::ONE), Limb::ONE); + } + + #[test] + fn wrapping_add_with_carry() { + assert_eq!(Limb::MAX.wrapping_add(Limb::ONE), Limb::ZERO); + } + + #[test] + fn checked_add_ok() { + let result = Limb::ZERO.checked_add(Limb::ONE); + assert_eq!(result.unwrap(), Limb::ONE); + } + + #[test] + fn checked_add_overflow() { + let result = Limb::MAX.checked_add(Limb::ONE); + assert!(!bool::from(result.is_some())); + } +} diff --git a/vendor/crypto-bigint/src/limb/bit_and.rs b/vendor/crypto-bigint/src/limb/bit_and.rs new file mode 100644 index 000000000..3f0bfba0e --- /dev/null +++ b/vendor/crypto-bigint/src/limb/bit_and.rs @@ -0,0 +1,21 @@ +//! Limb bit and operations. + +use super::Limb; +use core::ops::BitAnd; + +impl Limb { + /// Calculates `a & b`. + #[inline(always)] + pub const fn bitand(self, rhs: Self) -> Self { + Limb(self.0 & rhs.0) + } +} + +impl BitAnd for Limb { + type Output = Limb; + + #[inline(always)] + fn bitand(self, rhs: Self) -> Self::Output { + self.bitand(rhs) + } +} diff --git a/vendor/crypto-bigint/src/limb/bit_not.rs b/vendor/crypto-bigint/src/limb/bit_not.rs new file mode 100644 index 000000000..26676d598 --- /dev/null +++ b/vendor/crypto-bigint/src/limb/bit_not.rs @@ -0,0 +1,19 @@ +//! Limb bit not operations. + +use super::Limb; +use core::ops::Not; + +impl Limb { + /// Calculates `!a`. + pub const fn not(self) -> Self { + Limb(!self.0) + } +} + +impl Not for Limb { + type Output = Limb; + + fn not(self) -> ::Output { + self.not() + } +} diff --git a/vendor/crypto-bigint/src/limb/bit_or.rs b/vendor/crypto-bigint/src/limb/bit_or.rs new file mode 100644 index 000000000..cafac18da --- /dev/null +++ b/vendor/crypto-bigint/src/limb/bit_or.rs @@ -0,0 +1,19 @@ +//! Limb bit or operations. + +use super::Limb; +use core::ops::BitOr; + +impl Limb { + /// Calculates `a | b`. + pub const fn bitor(self, rhs: Self) -> Self { + Limb(self.0 | rhs.0) + } +} + +impl BitOr for Limb { + type Output = Limb; + + fn bitor(self, rhs: Self) -> Self::Output { + self.bitor(rhs) + } +} diff --git a/vendor/crypto-bigint/src/limb/bit_xor.rs b/vendor/crypto-bigint/src/limb/bit_xor.rs new file mode 100644 index 000000000..a50782293 --- /dev/null +++ b/vendor/crypto-bigint/src/limb/bit_xor.rs @@ -0,0 +1,19 @@ +//! Limb bit xor operations. + +use super::Limb; +use core::ops::BitXor; + +impl Limb { + /// Calculates `a ^ b`. + pub const fn bitxor(self, rhs: Self) -> Self { + Limb(self.0 ^ rhs.0) + } +} + +impl BitXor for Limb { + type Output = Limb; + + fn bitxor(self, rhs: Self) -> Self::Output { + self.bitxor(rhs) + } +} diff --git a/vendor/crypto-bigint/src/limb/bits.rs b/vendor/crypto-bigint/src/limb/bits.rs new file mode 100644 index 000000000..c470ae9ab --- /dev/null +++ b/vendor/crypto-bigint/src/limb/bits.rs @@ -0,0 +1,8 @@ +use super::Limb; + +impl Limb { + /// Calculate the number of bits needed to represent this number. + pub const fn bits(self) -> usize { + Limb::BIT_SIZE - (self.0.leading_zeros() as usize) + } +} diff --git a/vendor/crypto-bigint/src/limb/cmp.rs b/vendor/crypto-bigint/src/limb/cmp.rs new file mode 100644 index 000000000..76cc4b07c --- /dev/null +++ b/vendor/crypto-bigint/src/limb/cmp.rs @@ -0,0 +1,176 @@ +//! Limb comparisons + +use super::{Limb, SignedWord, WideSignedWord, Word, HI_BIT}; +use core::cmp::Ordering; +use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess}; + +impl Limb { + /// Is this limb an odd number? + #[inline] + pub fn is_odd(&self) -> Choice { + Choice::from(self.0 as u8 & 1) + } + + /// Perform a comparison of the inner value in variable-time. + /// + /// Note that the [`PartialOrd`] and [`Ord`] impls wrap constant-time + /// comparisons using the `subtle` crate. + pub fn cmp_vartime(&self, other: &Self) -> Ordering { + self.0.cmp(&other.0) + } + + /// Performs an equality check in variable-time. + pub const fn eq_vartime(&self, other: &Self) -> bool { + self.0 == other.0 + } + + /// Returns all 1's if `a`!=0 or 0 if `a`==0. + /// + /// Const-friendly: we can't yet use `subtle` in `const fn` contexts. + #[inline] + pub(crate) const fn is_nonzero(self) -> Word { + let inner = self.0 as SignedWord; + ((inner | inner.saturating_neg()) >> HI_BIT) as Word + } + + #[inline] + pub(crate) const fn ct_cmp(lhs: Self, rhs: Self) -> SignedWord { + let a = lhs.0 as WideSignedWord; + let b = rhs.0 as WideSignedWord; + let gt = ((b - a) >> Limb::BIT_SIZE) & 1; + let lt = ((a - b) >> Limb::BIT_SIZE) & 1 & !gt; + (gt as SignedWord) - (lt as SignedWord) + } +} + +impl ConstantTimeEq for Limb { + #[inline] + fn ct_eq(&self, other: &Self) -> Choice { + self.0.ct_eq(&other.0) + } +} + +impl ConstantTimeGreater for Limb { + #[inline] + fn ct_gt(&self, other: &Self) -> Choice { + self.0.ct_gt(&other.0) + } +} + +impl ConstantTimeLess for Limb { + #[inline] + fn ct_lt(&self, other: &Self) -> Choice { + self.0.ct_lt(&other.0) + } +} + +impl Eq for Limb {} + +impl Ord for Limb { + fn cmp(&self, other: &Self) -> Ordering { + let mut n = 0i8; + n -= self.ct_lt(other).unwrap_u8() as i8; + n += self.ct_gt(other).unwrap_u8() as i8; + + match n { + -1 => Ordering::Less, + 1 => Ordering::Greater, + _ => { + debug_assert_eq!(n, 0); + debug_assert!(bool::from(self.ct_eq(other))); + Ordering::Equal + } + } + } +} + +impl PartialOrd for Limb { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for Limb { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.ct_eq(other).into() + } +} + +#[cfg(test)] +mod tests { + use crate::{Limb, Zero}; + use core::cmp::Ordering; + use subtle::{ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess}; + + #[test] + fn is_zero() { + assert!(bool::from(Limb::ZERO.is_zero())); + assert!(!bool::from(Limb::ONE.is_zero())); + assert!(!bool::from(Limb::MAX.is_zero())); + } + + #[test] + fn is_odd() { + assert!(!bool::from(Limb::ZERO.is_odd())); + assert!(bool::from(Limb::ONE.is_odd())); + assert!(bool::from(Limb::MAX.is_odd())); + } + + #[test] + fn ct_eq() { + let a = Limb::ZERO; + let b = Limb::MAX; + + assert!(bool::from(a.ct_eq(&a))); + assert!(!bool::from(a.ct_eq(&b))); + assert!(!bool::from(b.ct_eq(&a))); + assert!(bool::from(b.ct_eq(&b))); + } + + #[test] + fn ct_gt() { + let a = Limb::ZERO; + let b = Limb::ONE; + let c = Limb::MAX; + + assert!(bool::from(b.ct_gt(&a))); + assert!(bool::from(c.ct_gt(&a))); + assert!(bool::from(c.ct_gt(&b))); + + assert!(!bool::from(a.ct_gt(&a))); + assert!(!bool::from(b.ct_gt(&b))); + assert!(!bool::from(c.ct_gt(&c))); + + assert!(!bool::from(a.ct_gt(&b))); + assert!(!bool::from(a.ct_gt(&c))); + assert!(!bool::from(b.ct_gt(&c))); + } + + #[test] + fn ct_lt() { + let a = Limb::ZERO; + let b = Limb::ONE; + let c = Limb::MAX; + + assert!(bool::from(a.ct_lt(&b))); + assert!(bool::from(a.ct_lt(&c))); + assert!(bool::from(b.ct_lt(&c))); + + assert!(!bool::from(a.ct_lt(&a))); + assert!(!bool::from(b.ct_lt(&b))); + assert!(!bool::from(c.ct_lt(&c))); + + assert!(!bool::from(b.ct_lt(&a))); + assert!(!bool::from(c.ct_lt(&a))); + assert!(!bool::from(c.ct_lt(&b))); + } + + #[test] + fn cmp() { + assert_eq!(Limb::ZERO.cmp(&Limb::ONE), Ordering::Less); + assert_eq!(Limb::ONE.cmp(&Limb::ONE), Ordering::Equal); + assert_eq!(Limb::MAX.cmp(&Limb::ONE), Ordering::Greater); + } +} diff --git a/vendor/crypto-bigint/src/limb/encoding.rs b/vendor/crypto-bigint/src/limb/encoding.rs new file mode 100644 index 000000000..39974b061 --- /dev/null +++ b/vendor/crypto-bigint/src/limb/encoding.rs @@ -0,0 +1,67 @@ +//! Limb encoding + +use super::{Limb, Word}; +use crate::Encoding; + +impl Encoding for Limb { + const BIT_SIZE: usize = Self::BIT_SIZE; + const BYTE_SIZE: usize = Self::BYTE_SIZE; + + #[cfg(target_pointer_width = "32")] + type Repr = [u8; 4]; + #[cfg(target_pointer_width = "64")] + type Repr = [u8; 8]; + + #[inline] + fn from_be_bytes(bytes: Self::Repr) -> Self { + Limb(Word::from_be_bytes(bytes)) + } + + #[inline] + fn from_le_bytes(bytes: Self::Repr) -> Self { + Limb(Word::from_le_bytes(bytes)) + } + + #[inline] + fn to_be_bytes(&self) -> Self::Repr { + self.0.to_be_bytes() + } + + #[inline] + fn to_le_bytes(&self) -> Self::Repr { + self.0.to_le_bytes() + } +} + +#[cfg(test)] +mod test { + use super::*; + use proptest::prelude::*; + + prop_compose! { + fn limb()(inner in any::()) -> Limb { + Limb(inner) + } + } + + proptest! { + #[test] + fn roundtrip(a in limb()) { + assert_eq!(a, Limb::from_be_bytes(a.to_be_bytes())); + assert_eq!(a, Limb::from_le_bytes(a.to_le_bytes())); + } + } + + proptest! { + #[test] + fn reverse(a in limb()) { + let mut bytes = a.to_be_bytes(); + bytes.reverse(); + assert_eq!(a, Limb::from_le_bytes(bytes)); + + let mut bytes = a.to_le_bytes(); + bytes.reverse(); + assert_eq!(a, Limb::from_be_bytes(bytes)); + } + } +} diff --git a/vendor/crypto-bigint/src/limb/from.rs b/vendor/crypto-bigint/src/limb/from.rs new file mode 100644 index 000000000..fd2765bc9 --- /dev/null +++ b/vendor/crypto-bigint/src/limb/from.rs @@ -0,0 +1,76 @@ +//! `From`-like conversions for [`Limb`]. + +use super::{Limb, WideWord, Word}; + +impl Limb { + /// Create a [`Limb`] from a `u8` integer (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u8(n: u8) -> Self { + Limb(n as Word) + } + + /// Create a [`Limb`] from a `u16` integer (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u16(n: u16) -> Self { + Limb(n as Word) + } + + /// Create a [`Limb`] from a `u32` integer (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u32(n: u32) -> Self { + #[allow(trivial_numeric_casts)] + Limb(n as Word) + } + + /// Create a [`Limb`] from a `u64` integer (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + #[cfg(target_pointer_width = "64")] + #[cfg_attr(docsrs, doc(cfg(target_pointer_width = "64")))] + pub const fn from_u64(n: u64) -> Self { + Limb(n) + } +} + +impl From for Limb { + #[inline] + fn from(n: u8) -> Limb { + Limb(n.into()) + } +} + +impl From for Limb { + #[inline] + fn from(n: u16) -> Limb { + Limb(n.into()) + } +} + +impl From for Limb { + #[inline] + fn from(n: u32) -> Limb { + Limb(n.into()) + } +} + +#[cfg(target_pointer_width = "64")] +#[cfg_attr(docsrs, doc(cfg(target_pointer_width = "64")))] +impl From for Limb { + #[inline] + fn from(n: u64) -> Limb { + Limb(n) + } +} + +impl From for Word { + #[inline] + fn from(limb: Limb) -> Word { + limb.0 + } +} + +impl From for WideWord { + #[inline] + fn from(limb: Limb) -> WideWord { + limb.0.into() + } +} diff --git a/vendor/crypto-bigint/src/limb/mul.rs b/vendor/crypto-bigint/src/limb/mul.rs new file mode 100644 index 000000000..a342c6eae --- /dev/null +++ b/vendor/crypto-bigint/src/limb/mul.rs @@ -0,0 +1,195 @@ +//! Limb multiplication + +use crate::{Checked, CheckedMul, Limb, WideWord, Word, Wrapping, Zero}; +use core::ops::{Mul, MulAssign}; +use subtle::CtOption; + +impl Limb { + /// Computes `self + (b * c) + carry`, returning the result along with the new carry. + #[inline(always)] + pub const fn mac(self, b: Limb, c: Limb, carry: Limb) -> (Limb, Limb) { + let a = self.0 as WideWord; + let b = b.0 as WideWord; + let c = c.0 as WideWord; + let carry = carry.0 as WideWord; + let ret = a + (b * c) + carry; + (Limb(ret as Word), Limb((ret >> Self::BIT_SIZE) as Word)) + } + + /// Perform saturating multiplication. + #[inline] + pub const fn saturating_mul(&self, rhs: Self) -> Self { + Limb(self.0.saturating_mul(rhs.0)) + } + + /// Perform wrapping multiplication, discarding overflow. + #[inline(always)] + pub const fn wrapping_mul(&self, rhs: Self) -> Self { + Limb(self.0.wrapping_mul(rhs.0)) + } + + /// Compute "wide" multiplication, with a product twice the size of the input. + pub(crate) const fn mul_wide(&self, rhs: Self) -> WideWord { + (self.0 as WideWord) * (rhs.0 as WideWord) + } +} + +impl CheckedMul for Limb { + type Output = Self; + + #[inline] + fn checked_mul(&self, rhs: Self) -> CtOption { + let result = self.mul_wide(rhs); + let overflow = Limb((result >> Self::BIT_SIZE) as Word); + CtOption::new(Limb(result as Word), overflow.is_zero()) + } +} + +impl Mul for Wrapping { + type Output = Self; + + fn mul(self, rhs: Self) -> Wrapping { + Wrapping(self.0.wrapping_mul(rhs.0)) + } +} + +impl Mul<&Wrapping> for Wrapping { + type Output = Wrapping; + + fn mul(self, rhs: &Wrapping) -> Wrapping { + Wrapping(self.0.wrapping_mul(rhs.0)) + } +} + +impl Mul> for &Wrapping { + type Output = Wrapping; + + fn mul(self, rhs: Wrapping) -> Wrapping { + Wrapping(self.0.wrapping_mul(rhs.0)) + } +} + +impl Mul<&Wrapping> for &Wrapping { + type Output = Wrapping; + + fn mul(self, rhs: &Wrapping) -> Wrapping { + Wrapping(self.0.wrapping_mul(rhs.0)) + } +} + +impl MulAssign for Wrapping { + fn mul_assign(&mut self, other: Self) { + *self = *self * other; + } +} + +impl MulAssign<&Wrapping> for Wrapping { + fn mul_assign(&mut self, other: &Self) { + *self = *self * other; + } +} + +impl Mul for Checked { + type Output = Self; + + fn mul(self, rhs: Self) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_mul(rhs))), + ) + } +} + +impl Mul<&Checked> for Checked { + type Output = Checked; + + fn mul(self, rhs: &Checked) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_mul(rhs))), + ) + } +} + +impl Mul> for &Checked { + type Output = Checked; + + fn mul(self, rhs: Checked) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_mul(rhs))), + ) + } +} + +impl Mul<&Checked> for &Checked { + type Output = Checked; + + fn mul(self, rhs: &Checked) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_mul(rhs))), + ) + } +} + +impl MulAssign for Checked { + fn mul_assign(&mut self, other: Self) { + *self = *self * other; + } +} + +impl MulAssign<&Checked> for Checked { + fn mul_assign(&mut self, other: &Self) { + *self = *self * other; + } +} + +#[cfg(test)] +mod tests { + use super::{CheckedMul, Limb, WideWord}; + + #[test] + fn mul_wide_zero_and_one() { + assert_eq!(Limb::ZERO.mul_wide(Limb::ZERO), 0); + assert_eq!(Limb::ZERO.mul_wide(Limb::ONE), 0); + assert_eq!(Limb::ONE.mul_wide(Limb::ZERO), 0); + assert_eq!(Limb::ONE.mul_wide(Limb::ONE), 1); + } + + #[test] + fn mul_wide() { + let primes: &[u32] = &[3, 5, 17, 256, 65537]; + + for &a_int in primes { + for &b_int in primes { + let actual = Limb::from_u32(a_int).mul_wide(Limb::from_u32(b_int)); + let expected = a_int as WideWord * b_int as WideWord; + assert_eq!(actual, expected); + } + } + } + + #[test] + #[cfg(target_pointer_width = "32")] + fn checked_mul_ok() { + let n = Limb::from_u16(0xffff); + assert_eq!(n.checked_mul(n).unwrap(), Limb::from_u32(0xfffe_0001)); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn checked_mul_ok() { + let n = Limb::from_u32(0xffff_ffff); + assert_eq!( + n.checked_mul(n).unwrap(), + Limb::from_u64(0xffff_fffe_0000_0001) + ); + } + + #[test] + fn checked_mul_overflow() { + let n = Limb::MAX; + assert!(bool::from(n.checked_mul(n).is_none())); + } +} diff --git a/vendor/crypto-bigint/src/limb/rand.rs b/vendor/crypto-bigint/src/limb/rand.rs new file mode 100644 index 000000000..0bc8af31a --- /dev/null +++ b/vendor/crypto-bigint/src/limb/rand.rs @@ -0,0 +1,45 @@ +//! Random number generator support + +use super::Limb; +use crate::{Encoding, NonZero, Random, RandomMod}; +use rand_core::{CryptoRng, RngCore}; +use subtle::ConstantTimeLess; + +#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))] +impl Random for Limb { + #[cfg(target_pointer_width = "32")] + fn random(mut rng: impl CryptoRng + RngCore) -> Self { + Self(rng.next_u32()) + } + + #[cfg(target_pointer_width = "64")] + fn random(mut rng: impl CryptoRng + RngCore) -> Self { + Self(rng.next_u64()) + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))] +impl RandomMod for Limb { + fn random_mod(mut rng: impl CryptoRng + RngCore, modulus: &NonZero) -> Self { + let mut bytes = ::Repr::default(); + + // TODO(tarcieri): use `div_ceil` when available + // See: https://github.com/rust-lang/rust/issues/88581 + let mut n_bytes = modulus.bits() / 8; + + // Ensure the randomly generated value can always be larger than + // the modulus in order to ensure a uniform distribution + if n_bytes < Self::BYTE_SIZE { + n_bytes += 1; + } + + loop { + rng.fill_bytes(&mut bytes[..n_bytes]); + let n = Limb::from_le_bytes(bytes); + + if n.ct_lt(modulus).into() { + return n; + } + } + } +} diff --git a/vendor/crypto-bigint/src/limb/shl.rs b/vendor/crypto-bigint/src/limb/shl.rs new file mode 100644 index 000000000..c0003ddbb --- /dev/null +++ b/vendor/crypto-bigint/src/limb/shl.rs @@ -0,0 +1,73 @@ +//! Limb left bitshift + +use crate::{Limb, Word}; +use core::ops::{Shl, ShlAssign}; + +impl Limb { + /// Computes `self << rhs`. + #[inline(always)] + pub const fn shl(self, rhs: Self) -> Self { + Limb(self.0 << rhs.0) + } +} + +impl Shl for Limb { + type Output = Self; + + #[inline(always)] + fn shl(self, rhs: Self) -> Self::Output { + self.shl(rhs) + } +} + +impl Shl for Limb { + type Output = Self; + + #[inline(always)] + fn shl(self, rhs: usize) -> Self::Output { + self.shl(Limb(rhs as Word)) + } +} + +impl ShlAssign for Limb { + #[inline(always)] + fn shl_assign(&mut self, other: Self) { + *self = self.shl(other); + } +} + +impl ShlAssign for Limb { + #[inline(always)] + fn shl_assign(&mut self, other: usize) { + *self = self.shl(Limb(other as Word)); + } +} + +#[cfg(test)] +mod tests { + use crate::Limb; + + #[test] + fn shl1() { + assert_eq!(Limb(1) << 1, Limb(2)); + } + + #[test] + fn shl2() { + assert_eq!(Limb(1) << 2, Limb(4)); + } + + #[test] + fn shl_assign1() { + let mut l = Limb(1); + l <<= 1; + assert_eq!(l, Limb(2)); + } + + #[test] + fn shl_assign2() { + let mut l = Limb(1); + l <<= 2; + assert_eq!(l, Limb(4)); + } +} diff --git a/vendor/crypto-bigint/src/limb/shr.rs b/vendor/crypto-bigint/src/limb/shr.rs new file mode 100644 index 000000000..29ee76353 --- /dev/null +++ b/vendor/crypto-bigint/src/limb/shr.rs @@ -0,0 +1,73 @@ +//! Limb right bitshift + +use crate::{Limb, Word}; +use core::ops::{Shr, ShrAssign}; + +impl Limb { + /// Computes `self >> rhs`. + #[inline(always)] + pub const fn shr(self, rhs: Self) -> Self { + Limb(self.0 >> rhs.0) + } +} + +impl Shr for Limb { + type Output = Self; + + #[inline(always)] + fn shr(self, rhs: Self) -> Self::Output { + self.shr(rhs) + } +} + +impl Shr for Limb { + type Output = Self; + + #[inline(always)] + fn shr(self, rhs: usize) -> Self::Output { + self.shr(Limb(rhs as Word)) + } +} + +impl ShrAssign for Limb { + #[inline(always)] + fn shr_assign(&mut self, other: Self) { + *self = self.shr(other); + } +} + +impl ShrAssign for Limb { + #[inline(always)] + fn shr_assign(&mut self, other: usize) { + *self = self.shr(Limb(other as Word)); + } +} + +#[cfg(test)] +mod tests { + use crate::Limb; + + #[test] + fn shr1() { + assert_eq!(Limb(2) >> 1, Limb(1)); + } + + #[test] + fn shr2() { + assert_eq!(Limb(16) >> 2, Limb(4)); + } + + #[test] + fn shr_assign1() { + let mut l = Limb::ONE; + l >>= 1; + assert_eq!(l, Limb::ZERO); + } + + #[test] + fn shr_assign2() { + let mut l = Limb(32); + l >>= 2; + assert_eq!(l, Limb(8)); + } +} diff --git a/vendor/crypto-bigint/src/limb/sub.rs b/vendor/crypto-bigint/src/limb/sub.rs new file mode 100644 index 000000000..1560a1b64 --- /dev/null +++ b/vendor/crypto-bigint/src/limb/sub.rs @@ -0,0 +1,182 @@ +//! Limb subtraction + +use crate::{Checked, CheckedSub, Limb, WideWord, Word, Wrapping, Zero}; +use core::ops::{Sub, SubAssign}; +use subtle::CtOption; + +impl Limb { + /// Computes `self - (rhs + borrow)`, returning the result along with the new borrow. + #[inline(always)] + pub const fn sbb(self, rhs: Limb, borrow: Limb) -> (Limb, Limb) { + let a = self.0 as WideWord; + let b = rhs.0 as WideWord; + let borrow = (borrow.0 >> (Self::BIT_SIZE - 1)) as WideWord; + let ret = a.wrapping_sub(b + borrow); + (Limb(ret as Word), Limb((ret >> Self::BIT_SIZE) as Word)) + } + + /// Perform saturating subtraction. + #[inline] + pub const fn saturating_sub(&self, rhs: Self) -> Self { + Limb(self.0.saturating_sub(rhs.0)) + } + + /// Perform wrapping subtraction, discarding underflow and wrapping around + /// the boundary of the type. + #[inline(always)] + pub const fn wrapping_sub(&self, rhs: Self) -> Self { + Limb(self.0.wrapping_sub(rhs.0)) + } +} + +impl CheckedSub for Limb { + type Output = Self; + + #[inline] + fn checked_sub(&self, rhs: Self) -> CtOption { + let (result, underflow) = self.sbb(rhs, Limb::ZERO); + CtOption::new(result, underflow.is_zero()) + } +} + +impl Sub for Wrapping { + type Output = Self; + + fn sub(self, rhs: Self) -> Wrapping { + Wrapping(self.0.wrapping_sub(rhs.0)) + } +} + +impl Sub<&Wrapping> for Wrapping { + type Output = Wrapping; + + fn sub(self, rhs: &Wrapping) -> Wrapping { + Wrapping(self.0.wrapping_sub(rhs.0)) + } +} + +impl Sub> for &Wrapping { + type Output = Wrapping; + + fn sub(self, rhs: Wrapping) -> Wrapping { + Wrapping(self.0.wrapping_sub(rhs.0)) + } +} + +impl Sub<&Wrapping> for &Wrapping { + type Output = Wrapping; + + fn sub(self, rhs: &Wrapping) -> Wrapping { + Wrapping(self.0.wrapping_sub(rhs.0)) + } +} + +impl SubAssign for Wrapping { + fn sub_assign(&mut self, other: Self) { + *self = *self - other; + } +} + +impl SubAssign<&Wrapping> for Wrapping { + fn sub_assign(&mut self, other: &Self) { + *self = *self - other; + } +} + +impl Sub for Checked { + type Output = Self; + + fn sub(self, rhs: Self) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(rhs))), + ) + } +} + +impl Sub<&Checked> for Checked { + type Output = Checked; + + fn sub(self, rhs: &Checked) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(rhs))), + ) + } +} + +impl Sub> for &Checked { + type Output = Checked; + + fn sub(self, rhs: Checked) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(rhs))), + ) + } +} + +impl Sub<&Checked> for &Checked { + type Output = Checked; + + fn sub(self, rhs: &Checked) -> Checked { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(rhs))), + ) + } +} + +impl SubAssign for Checked { + fn sub_assign(&mut self, other: Self) { + *self = *self - other; + } +} + +impl SubAssign<&Checked> for Checked { + fn sub_assign(&mut self, other: &Self) { + *self = *self - other; + } +} + +#[cfg(test)] +mod tests { + use crate::{CheckedSub, Limb}; + + #[test] + fn sbb_no_borrow() { + let (res, borrow) = Limb::ONE.sbb(Limb::ONE, Limb::ZERO); + assert_eq!(res, Limb::ZERO); + assert_eq!(borrow, Limb::ZERO); + } + + #[test] + fn sbb_with_borrow() { + let (res, borrow) = Limb::ZERO.sbb(Limb::ONE, Limb::ZERO); + + assert_eq!(res, Limb::MAX); + assert_eq!(borrow, Limb::MAX); + } + + #[test] + fn wrapping_sub_no_borrow() { + assert_eq!(Limb::ONE.wrapping_sub(Limb::ONE), Limb::ZERO); + } + + #[test] + fn wrapping_sub_with_borrow() { + assert_eq!(Limb::ZERO.wrapping_sub(Limb::ONE), Limb::MAX); + } + + #[test] + fn checked_sub_ok() { + let result = Limb::ONE.checked_sub(Limb::ONE); + assert_eq!(result.unwrap(), Limb::ZERO); + } + + #[test] + fn checked_sub_overflow() { + let result = Limb::ZERO.checked_sub(Limb::ONE); + assert!(!bool::from(result.is_some())); + } +} diff --git a/vendor/crypto-bigint/src/nlimbs.rs b/vendor/crypto-bigint/src/nlimbs.rs new file mode 100644 index 000000000..c3f91b9fe --- /dev/null +++ b/vendor/crypto-bigint/src/nlimbs.rs @@ -0,0 +1,29 @@ +/// Calculate the number of limbs required to represent the given number of bits. +// TODO(tarcieri): replace with `generic_const_exprs` (rust-lang/rust#76560) when stable +#[macro_export] +macro_rules! nlimbs { + ($bits:expr) => { + $bits / $crate::Limb::BIT_SIZE + }; +} + +#[cfg(test)] +mod tests { + #[cfg(target_pointer_width = "32")] + #[test] + fn nlimbs_for_bits_macro() { + assert_eq!(nlimbs!(64), 2); + assert_eq!(nlimbs!(128), 4); + assert_eq!(nlimbs!(192), 6); + assert_eq!(nlimbs!(256), 8); + } + + #[cfg(target_pointer_width = "64")] + #[test] + fn nlimbs_for_bits_macro() { + assert_eq!(nlimbs!(64), 1); + assert_eq!(nlimbs!(128), 2); + assert_eq!(nlimbs!(192), 3); + assert_eq!(nlimbs!(256), 4); + } +} diff --git a/vendor/crypto-bigint/src/non_zero.rs b/vendor/crypto-bigint/src/non_zero.rs new file mode 100644 index 000000000..fc3c9a84d --- /dev/null +++ b/vendor/crypto-bigint/src/non_zero.rs @@ -0,0 +1,385 @@ +//! Wrapper type for non-zero integers. + +use crate::{Encoding, Integer, Limb, UInt, Zero}; +use core::{ + fmt, + num::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8}, + ops::Deref, +}; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +#[cfg(feature = "generic-array")] +use crate::{ArrayEncoding, ByteArray}; + +#[cfg(feature = "rand_core")] +use { + crate::Random, + rand_core::{CryptoRng, RngCore}, +}; + +#[cfg(feature = "serde")] +use serdect::serde::{ + de::{Error, Unexpected}, + Deserialize, Deserializer, Serialize, Serializer, +}; + +/// Wrapper type for non-zero integers. +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, PartialOrd, Ord)] +pub struct NonZero(T); + +impl NonZero +where + T: Zero, +{ + /// Create a new non-zero integer. + pub fn new(n: T) -> CtOption { + let is_zero = n.is_zero(); + CtOption::new(Self(n), !is_zero) + } +} + +impl NonZero +where + T: Integer, +{ + /// The value `1`. + pub const ONE: Self = Self(T::ONE); + + /// Maximum value this integer can express. + pub const MAX: Self = Self(T::MAX); +} + +impl NonZero +where + T: Encoding + Zero, +{ + /// Decode from big endian bytes. + pub fn from_be_bytes(bytes: T::Repr) -> CtOption { + Self::new(T::from_be_bytes(bytes)) + } + + /// Decode from little endian bytes. + pub fn from_le_bytes(bytes: T::Repr) -> CtOption { + Self::new(T::from_le_bytes(bytes)) + } +} + +#[cfg(feature = "generic-array")] +#[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))] +impl NonZero +where + T: ArrayEncoding + Zero, +{ + /// Decode a non-zero integer from big endian bytes. + pub fn from_be_byte_array(bytes: ByteArray) -> CtOption { + Self::new(T::from_be_byte_array(bytes)) + } + + /// Decode a non-zero integer from big endian bytes. + pub fn from_le_byte_array(bytes: ByteArray) -> CtOption { + Self::new(T::from_be_byte_array(bytes)) + } +} + +impl AsRef for NonZero +where + T: Zero, +{ + fn as_ref(&self) -> &T { + &self.0 + } +} + +impl ConditionallySelectable for NonZero +where + T: ConditionallySelectable + Zero, +{ + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Self(T::conditional_select(&a.0, &b.0, choice)) + } +} + +impl ConstantTimeEq for NonZero +where + T: Zero, +{ + fn ct_eq(&self, other: &Self) -> Choice { + self.0.ct_eq(&other.0) + } +} + +impl Deref for NonZero +where + T: Zero, +{ + type Target = T; + + fn deref(&self) -> &T { + &self.0 + } +} + +#[cfg(feature = "rand_core")] +#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))] +impl Random for NonZero +where + T: Random + Zero, +{ + /// Generate a random `NonZero`. + fn random(mut rng: impl CryptoRng + RngCore) -> Self { + // Use rejection sampling to eliminate zero values. + // While this method isn't constant-time, the attacker shouldn't learn + // anything about unrelated outputs so long as `rng` is a secure `CryptoRng`. + loop { + if let Some(result) = Self::new(T::random(&mut rng)).into() { + break result; + } + } + } +} + +impl fmt::Display for NonZero +where + T: fmt::Display + Zero, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} + +impl fmt::Binary for NonZero +where + T: fmt::Binary + Zero, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Binary::fmt(&self.0, f) + } +} + +impl fmt::Octal for NonZero +where + T: fmt::Octal + Zero, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Octal::fmt(&self.0, f) + } +} + +impl fmt::LowerHex for NonZero +where + T: fmt::LowerHex + Zero, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::LowerHex::fmt(&self.0, f) + } +} + +impl fmt::UpperHex for NonZero +where + T: fmt::UpperHex + Zero, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::UpperHex::fmt(&self.0, f) + } +} + +impl NonZero { + /// Create a [`NonZero`] from a [`NonZeroU8`] (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u8(n: NonZeroU8) -> Self { + Self(Limb::from_u8(n.get())) + } + + /// Create a [`NonZero`] from a [`NonZeroU16`] (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u16(n: NonZeroU16) -> Self { + Self(Limb::from_u16(n.get())) + } + + /// Create a [`NonZero`] from a [`NonZeroU32`] (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u32(n: NonZeroU32) -> Self { + Self(Limb::from_u32(n.get())) + } + + /// Create a [`NonZero`] from a [`NonZeroU64`] (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + #[cfg(target_pointer_width = "64")] + #[cfg_attr(docsrs, doc(cfg(target_pointer_width = "64")))] + pub const fn from_u64(n: NonZeroU64) -> Self { + Self(Limb::from_u64(n.get())) + } +} + +impl From for NonZero { + fn from(integer: NonZeroU8) -> Self { + Self::from_u8(integer) + } +} + +impl From for NonZero { + fn from(integer: NonZeroU16) -> Self { + Self::from_u16(integer) + } +} + +impl From for NonZero { + fn from(integer: NonZeroU32) -> Self { + Self::from_u32(integer) + } +} + +#[cfg(target_pointer_width = "64")] +#[cfg_attr(docsrs, doc(cfg(target_pointer_width = "64")))] +impl From for NonZero { + fn from(integer: NonZeroU64) -> Self { + Self::from_u64(integer) + } +} + +impl NonZero> { + /// Create a [`NonZero`] from a [`UInt`] (const-friendly) + pub const fn from_uint(n: UInt) -> Self { + let mut i = 0; + let mut found_non_zero = false; + while i < LIMBS { + if n.limbs()[i].0 != 0 { + found_non_zero = true; + } + i += 1; + } + assert!(found_non_zero, "found zero"); + Self(n) + } + + /// Create a [`NonZero`] from a [`NonZeroU8`] (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u8(n: NonZeroU8) -> Self { + Self(UInt::from_u8(n.get())) + } + + /// Create a [`NonZero`] from a [`NonZeroU16`] (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u16(n: NonZeroU16) -> Self { + Self(UInt::from_u16(n.get())) + } + + /// Create a [`NonZero`] from a [`NonZeroU32`] (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u32(n: NonZeroU32) -> Self { + Self(UInt::from_u32(n.get())) + } + + /// Create a [`NonZero`] from a [`NonZeroU64`] (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u64(n: NonZeroU64) -> Self { + Self(UInt::from_u64(n.get())) + } + + /// Create a [`NonZero`] from a [`NonZeroU128`] (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u128(n: NonZeroU128) -> Self { + Self(UInt::from_u128(n.get())) + } +} + +impl From for NonZero> { + fn from(integer: NonZeroU8) -> Self { + Self::from_u8(integer) + } +} + +impl From for NonZero> { + fn from(integer: NonZeroU16) -> Self { + Self::from_u16(integer) + } +} + +impl From for NonZero> { + fn from(integer: NonZeroU32) -> Self { + Self::from_u32(integer) + } +} + +impl From for NonZero> { + fn from(integer: NonZeroU64) -> Self { + Self::from_u64(integer) + } +} + +impl From for NonZero> { + fn from(integer: NonZeroU128) -> Self { + Self::from_u128(integer) + } +} + +#[cfg(feature = "serde")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] +impl<'de, T: Deserialize<'de> + Zero> Deserialize<'de> for NonZero { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let value: T = T::deserialize(deserializer)?; + + if bool::from(value.is_zero()) { + Err(D::Error::invalid_value( + Unexpected::Other("zero"), + &"a non-zero value", + )) + } else { + Ok(Self(value)) + } + } +} + +#[cfg(feature = "serde")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] +impl<'de, T: Serialize + Zero> Serialize for NonZero { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.0.serialize(serializer) + } +} + +#[cfg(all(test, feature = "serde"))] +mod tests { + use crate::{NonZero, U64}; + use bincode::ErrorKind; + + #[test] + fn serde() { + let test = + Option::>::from(NonZero::new(U64::from_u64(0x0011223344556677))).unwrap(); + + let serialized = bincode::serialize(&test).unwrap(); + let deserialized: NonZero = bincode::deserialize(&serialized).unwrap(); + + assert_eq!(test, deserialized); + + let serialized = bincode::serialize(&U64::ZERO).unwrap(); + assert!(matches!( + *bincode::deserialize::>(&serialized).unwrap_err(), + ErrorKind::Custom(message) if message == "invalid value: zero, expected a non-zero value" + )); + } + + #[test] + fn serde_owned() { + let test = + Option::>::from(NonZero::new(U64::from_u64(0x0011223344556677))).unwrap(); + + let serialized = bincode::serialize(&test).unwrap(); + let deserialized: NonZero = bincode::deserialize_from(serialized.as_slice()).unwrap(); + + assert_eq!(test, deserialized); + + let serialized = bincode::serialize(&U64::ZERO).unwrap(); + assert!(matches!( + *bincode::deserialize_from::<_, NonZero>(serialized.as_slice()).unwrap_err(), + ErrorKind::Custom(message) if message == "invalid value: zero, expected a non-zero value" + )); + } +} diff --git a/vendor/crypto-bigint/src/traits.rs b/vendor/crypto-bigint/src/traits.rs new file mode 100644 index 000000000..b89c70a9b --- /dev/null +++ b/vendor/crypto-bigint/src/traits.rs @@ -0,0 +1,227 @@ +//! Traits provided by this crate + +use crate::{Limb, NonZero}; +use core::fmt::Debug; +use core::ops::{BitAnd, BitOr, BitXor, Div, Not, Rem, Shl, Shr}; +use subtle::{ + Choice, ConditionallySelectable, ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess, + CtOption, +}; + +#[cfg(feature = "rand_core")] +use rand_core::{CryptoRng, RngCore}; + +/// Integer type. +pub trait Integer: + 'static + + AsRef<[Limb]> + + BitAnd + + BitOr + + BitXor + + for<'a> CheckedAdd<&'a Self, Output = Self> + + for<'a> CheckedSub<&'a Self, Output = Self> + + for<'a> CheckedMul<&'a Self, Output = Self> + + Copy + + ConditionallySelectable + + ConstantTimeEq + + ConstantTimeGreater + + ConstantTimeLess + + Debug + + Default + + Div, Output = Self> + + Eq + + From + + Not + + Ord + + Rem, Output = Self> + + Send + + Sized + + Shl + + Shr + + Sync + + Zero +{ + /// The value `1`. + const ONE: Self; + + /// Maximum value this integer can express. + const MAX: Self; + + /// Is this integer value an odd number? + /// + /// # Returns + /// + /// If odd, returns `Choice(1)`. Otherwise, returns `Choice(0)`. + fn is_odd(&self) -> Choice; + + /// Is this integer value an even number? + /// + /// # Returns + /// + /// If even, returns `Choice(1)`. Otherwise, returns `Choice(0)`. + fn is_even(&self) -> Choice { + !self.is_odd() + } +} + +/// Zero values. +pub trait Zero: ConstantTimeEq + Sized { + /// The value `0`. + const ZERO: Self; + + /// Determine if this value is equal to zero. + /// + /// # Returns + /// + /// If zero, returns `Choice(1)`. Otherwise, returns `Choice(0)`. + fn is_zero(&self) -> Choice { + self.ct_eq(&Self::ZERO) + } +} + +/// Random number generation support. +#[cfg(feature = "rand_core")] +#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))] +pub trait Random: Sized { + /// Generate a cryptographically secure random value. + fn random(rng: impl CryptoRng + RngCore) -> Self; +} + +/// Modular random number generation support. +#[cfg(feature = "rand_core")] +#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))] +pub trait RandomMod: Sized + Zero { + /// Generate a cryptographically secure random number which is less than + /// a given `modulus`. + /// + /// This function uses rejection sampling, a method which produces an + /// unbiased distribution of in-range values provided the underlying + /// [`CryptoRng`] is unbiased, but runs in variable-time. + /// + /// The variable-time nature of the algorithm should not pose a security + /// issue so long as the underlying random number generator is truly a + /// [`CryptoRng`], where previous outputs are unrelated to subsequent + /// outputs and do not reveal information about the RNG's internal state. + fn random_mod(rng: impl CryptoRng + RngCore, modulus: &NonZero) -> Self; +} + +/// Compute `self + rhs mod p`. +pub trait AddMod { + /// Output type. + type Output; + + /// Compute `self + rhs mod p`. + /// + /// Assumes `self` and `rhs` are `< p`. + fn add_mod(&self, rhs: &Rhs, p: &Self) -> Self::Output; +} + +/// Compute `self - rhs mod p`. +pub trait SubMod { + /// Output type. + type Output; + + /// Compute `self - rhs mod p`. + /// + /// Assumes `self` and `rhs` are `< p`. + fn sub_mod(&self, rhs: &Rhs, p: &Self) -> Self::Output; +} + +/// Compute `-self mod p`. +pub trait NegMod { + /// Output type. + type Output; + + /// Compute `-self mod p`. + #[must_use] + fn neg_mod(&self, p: &Self) -> Self::Output; +} + +/// Compute `self * rhs mod p`. +/// +/// Requires `p_inv = -(p^{-1} mod 2^{BITS}) mod 2^{BITS}` to be provided for efficiency. +pub trait MulMod { + /// Output type. + type Output; + + /// Compute `self * rhs mod p`. + /// + /// Requires `p_inv = -(p^{-1} mod 2^{BITS}) mod 2^{BITS}` to be provided for efficiency. + fn mul_mod(&self, rhs: &Rhs, p: &Self, p_inv: Limb) -> Self::Output; +} + +/// Checked addition. +pub trait CheckedAdd: Sized { + /// Output type. + type Output; + + /// Perform checked subtraction, returning a [`CtOption`] which `is_some` + /// only if the operation did not overflow. + fn checked_add(&self, rhs: Rhs) -> CtOption; +} + +/// Checked multiplication. +pub trait CheckedMul: Sized { + /// Output type. + type Output; + + /// Perform checked multiplication, returning a [`CtOption`] which `is_some` + /// only if the operation did not overflow. + fn checked_mul(&self, rhs: Rhs) -> CtOption; +} + +/// Checked substraction. +pub trait CheckedSub: Sized { + /// Output type. + type Output; + + /// Perform checked subtraction, returning a [`CtOption`] which `is_some` + /// only if the operation did not underflow. + fn checked_sub(&self, rhs: Rhs) -> CtOption; +} + +/// Concatenate two numbers into a "wide" twice-width value, using the `rhs` +/// value as the least significant value. +pub trait Concat { + /// Concatenated output: twice the width of `Self`. + type Output; + + /// Concatenate the two values, with `self` as most significant and `rhs` + /// as the least significant. + fn concat(&self, rhs: &Self) -> Self::Output; +} + +/// Split a number in half, returning the most significant half followed by +/// the least significant. +pub trait Split { + /// Split output: high/low components of the value. + type Output; + + /// Split this number in half, returning its high and low components + /// respectively. + fn split(&self) -> (Self::Output, Self::Output); +} + +/// Encoding support. +pub trait Encoding: Sized { + /// Size of this integer in bits. + const BIT_SIZE: usize; + + /// Size of this integer in bytes. + const BYTE_SIZE: usize; + + /// Byte array representation. + type Repr: AsRef<[u8]> + AsMut<[u8]> + Copy + Clone + Sized; + + /// Decode from big endian bytes. + fn from_be_bytes(bytes: Self::Repr) -> Self; + + /// Decode from little endian bytes. + fn from_le_bytes(bytes: Self::Repr) -> Self; + + /// Encode to big endian bytes. + fn to_be_bytes(&self) -> Self::Repr; + + /// Encode to little endian bytes. + fn to_le_bytes(&self) -> Self::Repr; +} diff --git a/vendor/crypto-bigint/src/uint.rs b/vendor/crypto-bigint/src/uint.rs new file mode 100644 index 000000000..5aac67c5a --- /dev/null +++ b/vendor/crypto-bigint/src/uint.rs @@ -0,0 +1,484 @@ +//! Big unsigned integers. + +#![allow( + clippy::needless_range_loop, + clippy::many_single_char_names, + clippy::derive_hash_xor_eq +)] + +#[macro_use] +mod concat; +#[macro_use] +mod split; + +mod add; +mod add_mod; +mod bit_and; +mod bit_not; +mod bit_or; +mod bit_xor; +mod bits; +mod cmp; +mod div; +mod encoding; +mod from; +mod inv_mod; +mod mul; +mod mul_mod; +mod neg_mod; +mod resize; +mod shl; +mod shr; +mod sqrt; +mod sub; +mod sub_mod; + +#[cfg(feature = "generic-array")] +mod array; + +#[cfg(feature = "rand_core")] +mod rand; + +use crate::{Concat, Encoding, Integer, Limb, Split, Word, Zero}; +use core::{fmt, mem}; +use subtle::{Choice, ConditionallySelectable}; + +#[cfg(feature = "serde")] +use serdect::serde::{Deserialize, Deserializer, Serialize, Serializer}; + +#[cfg(feature = "zeroize")] +use zeroize::DefaultIsZeroes; + +/// Big unsigned integer. +/// +/// Generic over the given number of `LIMBS` +/// +/// # Encoding support +/// This type supports many different types of encodings, either via the +/// [`Encoding`][`crate::Encoding`] trait or various `const fn` decoding and +/// encoding functions that can be used with [`UInt`] constants. +/// +/// Optional crate features for encoding (off-by-default): +/// - `generic-array`: enables [`ArrayEncoding`][`crate::ArrayEncoding`] trait which can be used to +/// [`UInt`] as `GenericArray` and a [`ArrayDecoding`][`crate::ArrayDecoding`] trait which +/// can be used to `GenericArray` as [`UInt`]. +/// - `rlp`: support for [Recursive Length Prefix (RLP)][RLP] encoding. +/// +/// [RLP]: https://eth.wiki/fundamentals/rlp +// TODO(tarcieri): make generic around a specified number of bits. +#[derive(Copy, Clone, Debug, Hash)] +pub struct UInt { + /// Inner limb array. Stored from least significant to most significant. + limbs: [Limb; LIMBS], +} + +impl UInt { + /// The value `0`. + pub const ZERO: Self = Self::from_u8(0); + + /// The value `1`. + pub const ONE: Self = Self::from_u8(1); + + /// The number of limbs used on this platform. + pub const LIMBS: usize = LIMBS; + + /// Maximum value this [`UInt`] can express. + pub const MAX: Self = Self { + limbs: [Limb::MAX; LIMBS], + }; + + /// Const-friendly [`UInt`] constructor. + pub const fn new(limbs: [Limb; LIMBS]) -> Self { + Self { limbs } + } + + /// Create a [`UInt`] from an array of [`Word`]s (i.e. word-sized unsigned + /// integers). + #[inline] + pub const fn from_words(arr: [Word; LIMBS]) -> Self { + let mut limbs = [Limb::ZERO; LIMBS]; + let mut i = 0; + + while i < LIMBS { + limbs[i] = Limb(arr[i]); + i += 1; + } + + Self { limbs } + } + + /// Create an array of [`Word`]s (i.e. word-sized unsigned integers) from + /// a [`UInt`]. + #[inline] + pub const fn to_words(self) -> [Word; LIMBS] { + let mut arr = [0; LIMBS]; + let mut i = 0; + + while i < LIMBS { + arr[i] = self.limbs[i].0; + i += 1; + } + + arr + } + + /// Borrow the inner limbs as an array of [`Word`]s. + pub const fn as_words(&self) -> &[Word; LIMBS] { + // SAFETY: `Limb` is a `repr(transparent)` newtype for `Word` + #[allow(unsafe_code)] + unsafe { + // TODO(tarcieri): use &*((&self.limbs as *const _) as *const [Word; LIMBS]) + mem::transmute(&self.limbs) + } + } + + /// Borrow the inner limbs as a mutable array of [`Word`]s. + pub fn as_words_mut(&mut self) -> &mut [Word; LIMBS] { + // SAFETY: `Limb` is a `repr(transparent)` newtype for `Word` + #[allow(trivial_casts, unsafe_code)] + unsafe { + &mut *((&mut self.limbs as *mut _) as *mut [Word; LIMBS]) + } + } + + /// Deprecated: borrow the inner limbs as an array of [`Word`]s. + #[deprecated(since = "0.4.8", note = "please use `as_words` instead")] + pub const fn as_uint_array(&self) -> &[Word; LIMBS] { + self.as_words() + } + + /// Deprecated: create a [`UInt`] from an array of [`Word`]s. + #[deprecated(since = "0.4.8", note = "please use `from_words` instead")] + pub const fn from_uint_array(words: [Word; LIMBS]) -> Self { + Self::from_words(words) + } + + /// Deprecated: create an array of [`Word`]s from a [`UInt`]. + #[deprecated(since = "0.4.8", note = "please use `to_words` instead")] + pub const fn to_uint_array(self) -> [Word; LIMBS] { + self.to_words() + } + + /// Borrow the limbs of this [`UInt`]. + // TODO(tarcieri): rename to `as_limbs` for consistency with `as_words` + pub const fn limbs(&self) -> &[Limb; LIMBS] { + &self.limbs + } + + /// Borrow the limbs of this [`UInt`] mutably. + // TODO(tarcieri): rename to `as_limbs_mut` for consistency with `as_words_mut` + pub fn limbs_mut(&mut self) -> &mut [Limb; LIMBS] { + &mut self.limbs + } + + /// Convert this [`UInt`] into its inner limbs. + // TODO(tarcieri): rename to `to_limbs` for consistency with `to_words` + pub const fn into_limbs(self) -> [Limb; LIMBS] { + self.limbs + } +} + +impl AsRef<[Word; LIMBS]> for UInt { + fn as_ref(&self) -> &[Word; LIMBS] { + self.as_words() + } +} + +impl AsMut<[Word; LIMBS]> for UInt { + fn as_mut(&mut self) -> &mut [Word; LIMBS] { + self.as_words_mut() + } +} + +// TODO(tarcieri): eventually phase this out in favor of `limbs()`? +impl AsRef<[Limb]> for UInt { + fn as_ref(&self) -> &[Limb] { + self.limbs() + } +} + +// TODO(tarcieri): eventually phase this out in favor of `limbs_mut()`? +impl AsMut<[Limb]> for UInt { + fn as_mut(&mut self) -> &mut [Limb] { + self.limbs_mut() + } +} + +impl ConditionallySelectable for UInt { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + let mut limbs = [Limb::ZERO; LIMBS]; + + for i in 0..LIMBS { + limbs[i] = Limb::conditional_select(&a.limbs[i], &b.limbs[i], choice); + } + + Self { limbs } + } +} + +impl Default for UInt { + fn default() -> Self { + Self::ZERO + } +} + +impl Integer for UInt { + const ONE: Self = Self::ONE; + const MAX: Self = Self::MAX; + + fn is_odd(&self) -> Choice { + self.limbs + .first() + .map(|limb| limb.is_odd()) + .unwrap_or_else(|| Choice::from(0)) + } +} + +impl Zero for UInt { + const ZERO: Self = Self::ZERO; +} + +impl fmt::Display for UInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::UpperHex::fmt(self, f) + } +} + +impl fmt::LowerHex for UInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for limb in self.limbs.iter().rev() { + fmt::LowerHex::fmt(limb, f)?; + } + Ok(()) + } +} + +impl fmt::UpperHex for UInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for limb in self.limbs.iter().rev() { + fmt::UpperHex::fmt(limb, f)?; + } + Ok(()) + } +} + +#[cfg(feature = "serde")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] +impl<'de, const LIMBS: usize> Deserialize<'de> for UInt +where + UInt: Encoding, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let mut buffer = Self::ZERO.to_le_bytes(); + serdect::array::deserialize_hex_or_bin(buffer.as_mut(), deserializer)?; + + Ok(Self::from_le_bytes(buffer)) + } +} + +#[cfg(feature = "serde")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] +impl<'de, const LIMBS: usize> Serialize for UInt +where + UInt: Encoding, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serdect::array::serialize_hex_lower_or_bin(&Encoding::to_le_bytes(self), serializer) + } +} + +#[cfg(feature = "zeroize")] +#[cfg_attr(docsrs, doc(cfg(feature = "zeroize")))] +impl DefaultIsZeroes for UInt {} + +// TODO(tarcieri): use `const_evaluatable_checked` when stable to make generic around bits. +macro_rules! impl_uint_aliases { + ($(($name:ident, $bits:expr, $doc:expr)),+) => { + $( + #[doc = $doc] + #[doc="unsigned big integer."] + pub type $name = UInt<{nlimbs!($bits)}>; + + impl Encoding for $name { + const BIT_SIZE: usize = $bits; + const BYTE_SIZE: usize = $bits / 8; + + type Repr = [u8; $bits / 8]; + + #[inline] + fn from_be_bytes(bytes: Self::Repr) -> Self { + Self::from_be_slice(&bytes) + } + + #[inline] + fn from_le_bytes(bytes: Self::Repr) -> Self { + Self::from_le_slice(&bytes) + } + + #[inline] + fn to_be_bytes(&self) -> Self::Repr { + let mut result = [0u8; $bits / 8]; + self.write_be_bytes(&mut result); + result + } + + #[inline] + fn to_le_bytes(&self) -> Self::Repr { + let mut result = [0u8; $bits / 8]; + self.write_le_bytes(&mut result); + result + } + } + )+ + }; +} + +// TODO(tarcieri): use `const_evaluatable_checked` when stable to make generic around bits. +impl_uint_aliases! { + (U64, 64, "64-bit"), + (U128, 128, "128-bit"), + (U192, 192, "192-bit"), + (U256, 256, "256-bit"), + (U384, 384, "384-bit"), + (U448, 448, "448-bit"), + (U512, 512, "512-bit"), + (U576, 576, "576-bit"), + (U768, 768, "768-bit"), + (U896, 896, "896-bit"), + (U1024, 1024, "1024-bit"), + (U1536, 1536, "1536-bit"), + (U1792, 1792, "1792-bit"), + (U2048, 2048, "2048-bit"), + (U3072, 3072, "3072-bit"), + (U3584, 3584, "3584-bit"), + (U4096, 4096, "4096-bit"), + (U6144, 6144, "6144-bit"), + (U8192, 8192, "8192-bit") +} + +// TODO(tarcieri): use `const_evaluatable_checked` when stable to make generic around bits. +impl_concat! { + (U64, 64), + (U128, 128), + (U192, 192), + (U256, 256), + (U384, 384), + (U448, 448), + (U512, 512), + (U768, 768), + (U896, 896), + (U1024, 1024), + (U1536, 1536), + (U1792, 1792), + (U2048, 2048), + (U3072, 3072), + (U4096, 4096) +} + +// TODO(tarcieri): use `const_evaluatable_checked` when stable to make generic around bits. +impl_split! { + (U128, 128), + (U192, 192), + (U256, 256), + (U384, 384), + (U448, 448), + (U512, 512), + (U768, 768), + (U896, 896), + (U1024, 1024), + (U1536, 1536), + (U1792, 1792), + (U2048, 2048), + (U3072, 3072), + (U3584, 3584), + (U4096, 4096), + (U6144, 6144), + (U8192, 8192) +} + +#[cfg(test)] +mod tests { + use crate::{Encoding, U128}; + use subtle::ConditionallySelectable; + + #[cfg(feature = "serde")] + use crate::U64; + + #[test] + #[cfg(feature = "alloc")] + fn display() { + let hex = "AAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD"; + let n = U128::from_be_hex(hex); + + use alloc::string::ToString; + assert_eq!(hex, n.to_string()); + + let hex = "AAAAAAAABBBBBBBB0000000000000000"; + let n = U128::from_be_hex(hex); + assert_eq!(hex, n.to_string()); + + let hex = "AAAAAAAABBBBBBBB00000000DDDDDDDD"; + let n = U128::from_be_hex(hex); + assert_eq!(hex, n.to_string()); + + let hex = "AAAAAAAABBBBBBBB0CCCCCCCDDDDDDDD"; + let n = U128::from_be_hex(hex); + assert_eq!(hex, n.to_string()); + } + + #[test] + fn from_bytes() { + let a = U128::from_be_hex("AAAAAAAABBBBBBBB0CCCCCCCDDDDDDDD"); + + let be_bytes = a.to_be_bytes(); + let le_bytes = a.to_le_bytes(); + for i in 0..16 { + assert_eq!(le_bytes[i], be_bytes[15 - i]); + } + + let a_from_be = U128::from_be_bytes(be_bytes); + let a_from_le = U128::from_le_bytes(le_bytes); + assert_eq!(a_from_be, a_from_le); + assert_eq!(a_from_be, a); + } + + #[test] + fn conditional_select() { + let a = U128::from_be_hex("00002222444466668888AAAACCCCEEEE"); + let b = U128::from_be_hex("11113333555577779999BBBBDDDDFFFF"); + + let select_0 = U128::conditional_select(&a, &b, 0.into()); + assert_eq!(a, select_0); + + let select_1 = U128::conditional_select(&a, &b, 1.into()); + assert_eq!(b, select_1); + } + + #[test] + #[cfg(feature = "serde")] + fn serde() { + const TEST: U64 = U64::from_u64(0x0011223344556677); + + let serialized = bincode::serialize(&TEST).unwrap(); + let deserialized: U64 = bincode::deserialize(&serialized).unwrap(); + + assert_eq!(TEST, deserialized); + } + + #[test] + #[cfg(feature = "serde")] + fn serde_owned() { + const TEST: U64 = U64::from_u64(0x0011223344556677); + + let serialized = bincode::serialize(&TEST).unwrap(); + let deserialized: U64 = bincode::deserialize_from(serialized.as_slice()).unwrap(); + + assert_eq!(TEST, deserialized); + } +} diff --git a/vendor/crypto-bigint/src/uint/add.rs b/vendor/crypto-bigint/src/uint/add.rs new file mode 100644 index 000000000..2822e9e67 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/add.rs @@ -0,0 +1,189 @@ +//! [`UInt`] addition operations. + +use crate::{Checked, CheckedAdd, Limb, UInt, Wrapping, Zero}; +use core::ops::{Add, AddAssign}; +use subtle::CtOption; + +impl UInt { + /// Computes `a + b + carry`, returning the result along with the new carry. + #[inline(always)] + pub const fn adc(&self, rhs: &Self, mut carry: Limb) -> (Self, Limb) { + let mut limbs = [Limb::ZERO; LIMBS]; + let mut i = 0; + + while i < LIMBS { + let (w, c) = self.limbs[i].adc(rhs.limbs[i], carry); + limbs[i] = w; + carry = c; + i += 1; + } + + (Self { limbs }, carry) + } + + /// Perform saturating addition, returning `MAX` on overflow. + pub const fn saturating_add(&self, rhs: &Self) -> Self { + let (res, overflow) = self.adc(rhs, Limb::ZERO); + + if overflow.0 == 0 { + res + } else { + Self::MAX + } + } + + /// Perform wrapping addition, discarding overflow. + pub const fn wrapping_add(&self, rhs: &Self) -> Self { + self.adc(rhs, Limb::ZERO).0 + } +} + +impl CheckedAdd<&UInt> for UInt { + type Output = Self; + + fn checked_add(&self, rhs: &Self) -> CtOption { + let (result, carry) = self.adc(rhs, Limb::ZERO); + CtOption::new(result, carry.is_zero()) + } +} + +impl Add for Wrapping> { + type Output = Self; + + fn add(self, rhs: Self) -> Wrapping> { + Wrapping(self.0.wrapping_add(&rhs.0)) + } +} + +impl Add<&Wrapping>> for Wrapping> { + type Output = Wrapping>; + + fn add(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.wrapping_add(&rhs.0)) + } +} + +impl Add>> for &Wrapping> { + type Output = Wrapping>; + + fn add(self, rhs: Wrapping>) -> Wrapping> { + Wrapping(self.0.wrapping_add(&rhs.0)) + } +} + +impl Add<&Wrapping>> for &Wrapping> { + type Output = Wrapping>; + + fn add(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.wrapping_add(&rhs.0)) + } +} + +impl AddAssign for Wrapping> { + fn add_assign(&mut self, other: Self) { + *self = *self + other; + } +} + +impl AddAssign<&Wrapping>> for Wrapping> { + fn add_assign(&mut self, other: &Self) { + *self = *self + other; + } +} + +impl Add for Checked> { + type Output = Self; + + fn add(self, rhs: Self) -> Checked> { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))), + ) + } +} + +impl Add<&Checked>> for Checked> { + type Output = Checked>; + + fn add(self, rhs: &Checked>) -> Checked> { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))), + ) + } +} + +impl Add>> for &Checked> { + type Output = Checked>; + + fn add(self, rhs: Checked>) -> Checked> { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))), + ) + } +} + +impl Add<&Checked>> for &Checked> { + type Output = Checked>; + + fn add(self, rhs: &Checked>) -> Checked> { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))), + ) + } +} + +impl AddAssign for Checked> { + fn add_assign(&mut self, other: Self) { + *self = *self + other; + } +} + +impl AddAssign<&Checked>> for Checked> { + fn add_assign(&mut self, other: &Self) { + *self = *self + other; + } +} + +#[cfg(test)] +mod tests { + use crate::{CheckedAdd, Limb, U128}; + + #[test] + fn adc_no_carry() { + let (res, carry) = U128::ZERO.adc(&U128::ONE, Limb::ZERO); + assert_eq!(res, U128::ONE); + assert_eq!(carry, Limb::ZERO); + } + + #[test] + fn adc_with_carry() { + let (res, carry) = U128::MAX.adc(&U128::ONE, Limb::ZERO); + assert_eq!(res, U128::ZERO); + assert_eq!(carry, Limb::ONE); + } + + #[test] + fn wrapping_add_no_carry() { + assert_eq!(U128::ZERO.wrapping_add(&U128::ONE), U128::ONE); + } + + #[test] + fn wrapping_add_with_carry() { + assert_eq!(U128::MAX.wrapping_add(&U128::ONE), U128::ZERO); + } + + #[test] + fn checked_add_ok() { + let result = U128::ZERO.checked_add(&U128::ONE); + assert_eq!(result.unwrap(), U128::ONE); + } + + #[test] + fn checked_add_overflow() { + let result = U128::MAX.checked_add(&U128::ONE); + assert!(!bool::from(result.is_some())); + } +} diff --git a/vendor/crypto-bigint/src/uint/add_mod.rs b/vendor/crypto-bigint/src/uint/add_mod.rs new file mode 100644 index 000000000..3486a0a57 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/add_mod.rs @@ -0,0 +1,139 @@ +//! [`UInt`] addition modulus operations. + +use crate::{AddMod, Limb, UInt}; + +impl UInt { + /// Computes `self + rhs mod p` in constant time. + /// + /// Assumes `self + rhs` as unbounded integer is `< 2p`. + pub const fn add_mod(&self, rhs: &UInt, p: &UInt) -> UInt { + let (w, carry) = self.adc(rhs, Limb::ZERO); + + // Attempt to subtract the modulus, to ensure the result is in the field. + let (w, borrow) = w.sbb(p, Limb::ZERO); + let (_, borrow) = carry.sbb(Limb::ZERO, borrow); + + // If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise + // borrow = 0x000...000. Thus, we use it as a mask to conditionally add the + // modulus. + let mut i = 0; + let mut res = Self::ZERO; + let mut carry = Limb::ZERO; + + while i < LIMBS { + let rhs = p.limbs[i].bitand(borrow); + let (limb, c) = w.limbs[i].adc(rhs, carry); + res.limbs[i] = limb; + carry = c; + i += 1; + } + + res + } + + /// Computes `self + rhs mod p` in constant time for the special modulus + /// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`]. + /// + /// Assumes `self + rhs` as unbounded integer is `< 2p`. + pub const fn add_mod_special(&self, rhs: &Self, c: Limb) -> Self { + // `UInt::adc` also works with a carry greater than 1. + let (out, carry) = self.adc(rhs, c); + + // If overflow occurred, then above addition of `c` already accounts + // for the overflow. Otherwise, we need to subtract `c` again, which + // in that case cannot underflow. + let l = carry.0.wrapping_sub(1) & c.0; + let (out, _) = out.sbb(&UInt::from_word(l), Limb::ZERO); + out + } +} + +impl AddMod for UInt { + type Output = Self; + + fn add_mod(&self, rhs: &Self, p: &Self) -> Self { + debug_assert!(self < p); + debug_assert!(rhs < p); + self.add_mod(rhs, p) + } +} + +#[cfg(all(test, feature = "rand"))] +mod tests { + use crate::{Limb, NonZero, Random, RandomMod, UInt, U256}; + use rand_core::SeedableRng; + + // TODO(tarcieri): additional tests + proptests + + #[test] + fn add_mod_nist_p256() { + let a = + U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56"); + let b = + U256::from_be_hex("d5777c45019673125ad240f83094d4252d829516fac8601ed01979ec1ec1a251"); + let n = + U256::from_be_hex("ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551"); + + let actual = a.add_mod(&b, &n); + let expected = + U256::from_be_hex("1a2472fde50286541d97ca6a3592dd75beb9c9646e40c511b82496cfc3926956"); + + assert_eq!(expected, actual); + } + + macro_rules! test_add_mod_special { + ($size:expr, $test_name:ident) => { + #[test] + fn $test_name() { + let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1); + let moduli = [ + NonZero::::random(&mut rng), + NonZero::::random(&mut rng), + ]; + + for special in &moduli { + let p = &NonZero::new(UInt::ZERO.wrapping_sub(&UInt::from_word(special.0))) + .unwrap(); + + let minus_one = p.wrapping_sub(&UInt::ONE); + + let base_cases = [ + (UInt::ZERO, UInt::ZERO, UInt::ZERO), + (UInt::ONE, UInt::ZERO, UInt::ONE), + (UInt::ZERO, UInt::ONE, UInt::ONE), + (minus_one, UInt::ONE, UInt::ZERO), + (UInt::ONE, minus_one, UInt::ZERO), + ]; + for (a, b, c) in &base_cases { + let x = a.add_mod_special(b, *special.as_ref()); + assert_eq!(*c, x, "{} + {} mod {} = {} != {}", a, b, p, x, c); + } + + for _i in 0..100 { + let a = UInt::<$size>::random_mod(&mut rng, p); + let b = UInt::<$size>::random_mod(&mut rng, p); + + let c = a.add_mod_special(&b, *special.as_ref()); + assert!(c < **p, "not reduced: {} >= {} ", c, p); + + let expected = a.add_mod(&b, p); + assert_eq!(c, expected, "incorrect result"); + } + } + } + }; + } + + test_add_mod_special!(1, add_mod_special_1); + test_add_mod_special!(2, add_mod_special_2); + test_add_mod_special!(3, add_mod_special_3); + test_add_mod_special!(4, add_mod_special_4); + test_add_mod_special!(5, add_mod_special_5); + test_add_mod_special!(6, add_mod_special_6); + test_add_mod_special!(7, add_mod_special_7); + test_add_mod_special!(8, add_mod_special_8); + test_add_mod_special!(9, add_mod_special_9); + test_add_mod_special!(10, add_mod_special_10); + test_add_mod_special!(11, add_mod_special_11); + test_add_mod_special!(12, add_mod_special_12); +} diff --git a/vendor/crypto-bigint/src/uint/array.rs b/vendor/crypto-bigint/src/uint/array.rs new file mode 100644 index 000000000..cba2b3716 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/array.rs @@ -0,0 +1,189 @@ +//! `generic-array` integration with `UInt`. +// TODO(tarcieri): completely phase out `generic-array` when const generics are powerful enough + +use crate::{ArrayDecoding, ArrayEncoding, ByteArray}; +use generic_array::{typenum, GenericArray}; + +macro_rules! impl_uint_array_encoding { + ($(($uint:ident, $bytes:path)),+) => { + $( + #[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))] + impl ArrayEncoding for super::$uint { + type ByteSize = $bytes; + + #[inline] + fn from_be_byte_array(bytes: ByteArray) -> Self { + Self::from_be_slice(&bytes) + } + + #[inline] + fn from_le_byte_array(bytes: ByteArray) -> Self { + Self::from_le_slice(&bytes) + } + + #[inline] + fn to_be_byte_array(&self) -> ByteArray { + let mut result = GenericArray::default(); + self.write_be_bytes(&mut result); + result + } + + #[inline] + fn to_le_byte_array(&self) -> ByteArray { + let mut result = GenericArray::default(); + self.write_le_bytes(&mut result); + result + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))] + impl ArrayDecoding for GenericArray { + type Output = super::$uint; + + fn into_uint_be(self) -> Self::Output { + Self::Output::from_be_byte_array(self) + } + + fn into_uint_le(self) -> Self::Output { + Self::Output::from_le_byte_array(self) + } + } + )+ + }; +} + +// TODO(tarcieri): use `const_evaluatable_checked` when stable to make generic around bits. +impl_uint_array_encoding! { + (U64, typenum::U8), + (U128, typenum::U16), + (U192, typenum::U24), + (U256, typenum::U32), + (U384, typenum::U48), + (U448, typenum::U56), + (U512, typenum::U64), + (U576, typenum::U72), + (U768, typenum::U96), + (U896, typenum::U112), + (U1024, typenum::U128), + (U1536, typenum::U192), + (U1792, typenum::U224), + (U2048, typenum::U256), + (U3072, typenum::U384), + (U3584, typenum::U448), + (U4096, typenum::U512), + (U6144, typenum::U768), + (U8192, typenum::U1024) +} + +#[cfg(test)] +mod tests { + use crate::{ArrayDecoding, ArrayEncoding, Limb}; + use hex_literal::hex; + + #[cfg(target_pointer_width = "32")] + use crate::U64 as UIntEx; + + #[cfg(target_pointer_width = "64")] + use crate::U128 as UIntEx; + + /// Byte array that corresponds to `UIntEx` + type ByteArray = crate::ByteArray; + + #[test] + #[cfg(target_pointer_width = "32")] + fn from_be_byte_array() { + let n = UIntEx::from_be_byte_array(hex!("0011223344556677").into()); + assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn from_be_byte_array() { + let n = UIntEx::from_be_byte_array(hex!("00112233445566778899aabbccddeeff").into()); + assert_eq!( + n.limbs(), + &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)] + ); + } + + #[test] + #[cfg(target_pointer_width = "32")] + fn from_le_byte_array() { + let n = UIntEx::from_le_byte_array(hex!("7766554433221100").into()); + assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn from_le_byte_array() { + let n = UIntEx::from_le_byte_array(hex!("ffeeddccbbaa99887766554433221100").into()); + assert_eq!( + n.limbs(), + &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)] + ); + } + + #[test] + #[cfg(target_pointer_width = "32")] + fn to_be_byte_array() { + let expected_bytes = ByteArray::from(hex!("0011223344556677")); + let actual_bytes = UIntEx::from_be_byte_array(expected_bytes).to_be_byte_array(); + assert_eq!(expected_bytes, actual_bytes); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn to_be_byte_array() { + let expected_bytes = ByteArray::from(hex!("00112233445566778899aabbccddeeff")); + let actual_bytes = UIntEx::from_be_byte_array(expected_bytes).to_be_byte_array(); + assert_eq!(expected_bytes, actual_bytes); + } + + #[test] + #[cfg(target_pointer_width = "32")] + fn to_le_byte_array() { + let expected_bytes = ByteArray::from(hex!("7766554433221100")); + let actual_bytes = UIntEx::from_le_byte_array(expected_bytes).to_le_byte_array(); + assert_eq!(expected_bytes, actual_bytes); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn to_le_byte_array() { + let expected_bytes = ByteArray::from(hex!("ffeeddccbbaa99887766554433221100")); + let actual_bytes = UIntEx::from_le_byte_array(expected_bytes).to_le_byte_array(); + assert_eq!(expected_bytes, actual_bytes); + } + + #[test] + #[cfg(target_pointer_width = "32")] + fn into_uint_be() { + let expected_bytes = ByteArray::from(hex!("0011223344556677")); + let actual_bytes = expected_bytes.into_uint_be().to_be_byte_array(); + assert_eq!(expected_bytes, actual_bytes); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn into_uint_be() { + let expected_bytes = ByteArray::from(hex!("00112233445566778899aabbccddeeff")); + let actual_bytes = expected_bytes.into_uint_be().to_be_byte_array(); + assert_eq!(expected_bytes, actual_bytes); + } + + #[test] + #[cfg(target_pointer_width = "32")] + fn into_uint_le() { + let expected_bytes = ByteArray::from(hex!("7766554433221100")); + let actual_bytes = expected_bytes.into_uint_le().to_le_byte_array(); + assert_eq!(expected_bytes, actual_bytes); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn into_uint_le() { + let expected_bytes = ByteArray::from(hex!("ffeeddccbbaa99887766554433221100")); + let actual_bytes = expected_bytes.into_uint_le().to_le_byte_array(); + assert_eq!(expected_bytes, actual_bytes); + } +} diff --git a/vendor/crypto-bigint/src/uint/bit_and.rs b/vendor/crypto-bigint/src/uint/bit_and.rs new file mode 100644 index 000000000..cab89a429 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/bit_and.rs @@ -0,0 +1,145 @@ +//! [`UInt`] bitwise and operations. + +use super::UInt; +use crate::{Limb, Wrapping}; +use core::ops::{BitAnd, BitAndAssign}; +use subtle::{Choice, CtOption}; + +impl UInt { + /// Computes bitwise `a & b`. + #[inline(always)] + pub const fn bitand(&self, rhs: &Self) -> Self { + let mut limbs = [Limb::ZERO; LIMBS]; + let mut i = 0; + + while i < LIMBS { + limbs[i] = self.limbs[i].bitand(rhs.limbs[i]); + i += 1; + } + + Self { limbs } + } + + /// Perform wrapping bitwise `AND`. + /// + /// There's no way wrapping could ever happen. + /// This function exists so that all operations are accounted for in the wrapping operations + pub const fn wrapping_and(&self, rhs: &Self) -> Self { + self.bitand(rhs) + } + + /// Perform checked bitwise `AND`, returning a [`CtOption`] which `is_some` always + pub fn checked_and(&self, rhs: &Self) -> CtOption { + let result = self.bitand(rhs); + CtOption::new(result, Choice::from(1)) + } +} + +impl BitAnd for UInt { + type Output = Self; + + fn bitand(self, rhs: Self) -> UInt { + self.bitand(&rhs) + } +} + +impl BitAnd<&UInt> for UInt { + type Output = UInt; + + fn bitand(self, rhs: &UInt) -> UInt { + (&self).bitand(rhs) + } +} + +impl BitAnd> for &UInt { + type Output = UInt; + + fn bitand(self, rhs: UInt) -> UInt { + self.bitand(&rhs) + } +} + +impl BitAnd<&UInt> for &UInt { + type Output = UInt; + + fn bitand(self, rhs: &UInt) -> UInt { + self.bitand(rhs) + } +} + +impl BitAndAssign for UInt { + #[allow(clippy::assign_op_pattern)] + fn bitand_assign(&mut self, other: Self) { + *self = *self & other; + } +} + +impl BitAndAssign<&UInt> for UInt { + #[allow(clippy::assign_op_pattern)] + fn bitand_assign(&mut self, other: &Self) { + *self = *self & other; + } +} + +impl BitAnd for Wrapping> { + type Output = Self; + + fn bitand(self, rhs: Self) -> Wrapping> { + Wrapping(self.0.bitand(&rhs.0)) + } +} + +impl BitAnd<&Wrapping>> for Wrapping> { + type Output = Wrapping>; + + fn bitand(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.bitand(&rhs.0)) + } +} + +impl BitAnd>> for &Wrapping> { + type Output = Wrapping>; + + fn bitand(self, rhs: Wrapping>) -> Wrapping> { + Wrapping(self.0.bitand(&rhs.0)) + } +} + +impl BitAnd<&Wrapping>> for &Wrapping> { + type Output = Wrapping>; + + fn bitand(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.bitand(&rhs.0)) + } +} + +impl BitAndAssign for Wrapping> { + #[allow(clippy::assign_op_pattern)] + fn bitand_assign(&mut self, other: Self) { + *self = *self & other; + } +} + +impl BitAndAssign<&Wrapping>> for Wrapping> { + #[allow(clippy::assign_op_pattern)] + fn bitand_assign(&mut self, other: &Self) { + *self = *self & other; + } +} + +#[cfg(test)] +mod tests { + use crate::U128; + + #[test] + fn checked_and_ok() { + let result = U128::ZERO.checked_and(&U128::ONE); + assert_eq!(result.unwrap(), U128::ZERO); + } + + #[test] + fn overlapping_and_ok() { + let result = U128::MAX.wrapping_and(&U128::ONE); + assert_eq!(result, U128::ONE); + } +} diff --git a/vendor/crypto-bigint/src/uint/bit_not.rs b/vendor/crypto-bigint/src/uint/bit_not.rs new file mode 100644 index 000000000..747d3b49e --- /dev/null +++ b/vendor/crypto-bigint/src/uint/bit_not.rs @@ -0,0 +1,48 @@ +//! [`UInt`] bitwise not operations. + +use super::UInt; +use crate::{Limb, Wrapping}; +use core::ops::Not; + +impl UInt { + /// Computes bitwise `!a`. + #[inline(always)] + pub const fn not(&self) -> Self { + let mut limbs = [Limb::ZERO; LIMBS]; + let mut i = 0; + + while i < LIMBS { + limbs[i] = self.limbs[i].not(); + i += 1; + } + + Self { limbs } + } +} + +impl Not for UInt { + type Output = Self; + + fn not(self) -> ::Output { + (&self).not() + } +} + +impl Not for Wrapping> { + type Output = Self; + + fn not(self) -> ::Output { + Wrapping(self.0.not()) + } +} + +#[cfg(test)] +mod tests { + use crate::U128; + + #[test] + fn bitnot_ok() { + assert_eq!(U128::ZERO.not(), U128::MAX); + assert_eq!(U128::MAX.not(), U128::ZERO); + } +} diff --git a/vendor/crypto-bigint/src/uint/bit_or.rs b/vendor/crypto-bigint/src/uint/bit_or.rs new file mode 100644 index 000000000..4a01a8343 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/bit_or.rs @@ -0,0 +1,141 @@ +//! [`UInt`] bitwise or operations. + +use super::UInt; +use crate::{Limb, Wrapping}; +use core::ops::{BitOr, BitOrAssign}; +use subtle::{Choice, CtOption}; + +impl UInt { + /// Computes bitwise `a & b`. + #[inline(always)] + pub const fn bitor(&self, rhs: &Self) -> Self { + let mut limbs = [Limb::ZERO; LIMBS]; + let mut i = 0; + + while i < LIMBS { + limbs[i] = self.limbs[i].bitor(rhs.limbs[i]); + i += 1; + } + + Self { limbs } + } + + /// Perform wrapping bitwise `OR`. + /// + /// There's no way wrapping could ever happen. + /// This function exists so that all operations are accounted for in the wrapping operations + pub const fn wrapping_or(&self, rhs: &Self) -> Self { + self.bitor(rhs) + } + + /// Perform checked bitwise `OR`, returning a [`CtOption`] which `is_some` always + pub fn checked_or(&self, rhs: &Self) -> CtOption { + let result = self.bitor(rhs); + CtOption::new(result, Choice::from(1)) + } +} + +impl BitOr for UInt { + type Output = Self; + + fn bitor(self, rhs: Self) -> UInt { + self.bitor(&rhs) + } +} + +impl BitOr<&UInt> for UInt { + type Output = UInt; + + fn bitor(self, rhs: &UInt) -> UInt { + (&self).bitor(rhs) + } +} + +impl BitOr> for &UInt { + type Output = UInt; + + fn bitor(self, rhs: UInt) -> UInt { + self.bitor(&rhs) + } +} + +impl BitOr<&UInt> for &UInt { + type Output = UInt; + + fn bitor(self, rhs: &UInt) -> UInt { + self.bitor(rhs) + } +} + +impl BitOrAssign for UInt { + fn bitor_assign(&mut self, other: Self) { + *self = *self | other; + } +} + +impl BitOrAssign<&UInt> for UInt { + fn bitor_assign(&mut self, other: &Self) { + *self = *self | other; + } +} + +impl BitOr for Wrapping> { + type Output = Self; + + fn bitor(self, rhs: Self) -> Wrapping> { + Wrapping(self.0.bitor(&rhs.0)) + } +} + +impl BitOr<&Wrapping>> for Wrapping> { + type Output = Wrapping>; + + fn bitor(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.bitor(&rhs.0)) + } +} + +impl BitOr>> for &Wrapping> { + type Output = Wrapping>; + + fn bitor(self, rhs: Wrapping>) -> Wrapping> { + Wrapping(self.0.bitor(&rhs.0)) + } +} + +impl BitOr<&Wrapping>> for &Wrapping> { + type Output = Wrapping>; + + fn bitor(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.bitor(&rhs.0)) + } +} + +impl BitOrAssign for Wrapping> { + fn bitor_assign(&mut self, other: Self) { + *self = *self | other; + } +} + +impl BitOrAssign<&Wrapping>> for Wrapping> { + fn bitor_assign(&mut self, other: &Self) { + *self = *self | other; + } +} + +#[cfg(test)] +mod tests { + use crate::U128; + + #[test] + fn checked_or_ok() { + let result = U128::ZERO.checked_or(&U128::ONE); + assert_eq!(result.unwrap(), U128::ONE); + } + + #[test] + fn overlapping_or_ok() { + let result = U128::MAX.wrapping_or(&U128::ONE); + assert_eq!(result, U128::MAX); + } +} diff --git a/vendor/crypto-bigint/src/uint/bit_xor.rs b/vendor/crypto-bigint/src/uint/bit_xor.rs new file mode 100644 index 000000000..16d78ad3a --- /dev/null +++ b/vendor/crypto-bigint/src/uint/bit_xor.rs @@ -0,0 +1,141 @@ +//! [`UInt`] bitwise xor operations. + +use super::UInt; +use crate::{Limb, Wrapping}; +use core::ops::{BitXor, BitXorAssign}; +use subtle::{Choice, CtOption}; + +impl UInt { + /// Computes bitwise `a ^ b`. + #[inline(always)] + pub const fn bitxor(&self, rhs: &Self) -> Self { + let mut limbs = [Limb::ZERO; LIMBS]; + let mut i = 0; + + while i < LIMBS { + limbs[i] = self.limbs[i].bitxor(rhs.limbs[i]); + i += 1; + } + + Self { limbs } + } + + /// Perform wrapping bitwise `XOR``. + /// + /// There's no way wrapping could ever happen. + /// This function exists so that all operations are accounted for in the wrapping operations + pub const fn wrapping_xor(&self, rhs: &Self) -> Self { + self.bitxor(rhs) + } + + /// Perform checked bitwise `XOR`, returning a [`CtOption`] which `is_some` always + pub fn checked_xor(&self, rhs: &Self) -> CtOption { + let result = self.bitxor(rhs); + CtOption::new(result, Choice::from(1)) + } +} + +impl BitXor for UInt { + type Output = Self; + + fn bitxor(self, rhs: Self) -> UInt { + self.bitxor(&rhs) + } +} + +impl BitXor<&UInt> for UInt { + type Output = UInt; + + fn bitxor(self, rhs: &UInt) -> UInt { + (&self).bitxor(rhs) + } +} + +impl BitXor> for &UInt { + type Output = UInt; + + fn bitxor(self, rhs: UInt) -> UInt { + self.bitxor(&rhs) + } +} + +impl BitXor<&UInt> for &UInt { + type Output = UInt; + + fn bitxor(self, rhs: &UInt) -> UInt { + self.bitxor(rhs) + } +} + +impl BitXorAssign for UInt { + fn bitxor_assign(&mut self, other: Self) { + *self = *self ^ other; + } +} + +impl BitXorAssign<&UInt> for UInt { + fn bitxor_assign(&mut self, other: &Self) { + *self = *self ^ other; + } +} + +impl BitXor for Wrapping> { + type Output = Self; + + fn bitxor(self, rhs: Self) -> Wrapping> { + Wrapping(self.0.bitxor(&rhs.0)) + } +} + +impl BitXor<&Wrapping>> for Wrapping> { + type Output = Wrapping>; + + fn bitxor(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.bitxor(&rhs.0)) + } +} + +impl BitXor>> for &Wrapping> { + type Output = Wrapping>; + + fn bitxor(self, rhs: Wrapping>) -> Wrapping> { + Wrapping(self.0.bitxor(&rhs.0)) + } +} + +impl BitXor<&Wrapping>> for &Wrapping> { + type Output = Wrapping>; + + fn bitxor(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.bitxor(&rhs.0)) + } +} + +impl BitXorAssign for Wrapping> { + fn bitxor_assign(&mut self, other: Self) { + *self = *self ^ other; + } +} + +impl BitXorAssign<&Wrapping>> for Wrapping> { + fn bitxor_assign(&mut self, other: &Self) { + *self = *self ^ other; + } +} + +#[cfg(test)] +mod tests { + use crate::U128; + + #[test] + fn checked_xor_ok() { + let result = U128::ZERO.checked_xor(&U128::ONE); + assert_eq!(result.unwrap(), U128::ONE); + } + + #[test] + fn overlapping_xor_ok() { + let result = U128::ZERO.wrapping_xor(&U128::ONE); + assert_eq!(result, U128::ONE); + } +} diff --git a/vendor/crypto-bigint/src/uint/bits.rs b/vendor/crypto-bigint/src/uint/bits.rs new file mode 100644 index 000000000..b76d89fa5 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/bits.rs @@ -0,0 +1,55 @@ +use crate::{Limb, UInt, Word}; + +impl UInt { + /// Get the value of the bit at position `index`, as a 0- or 1-valued Word. + /// Returns 0 for indices out of range. + #[inline(always)] + pub const fn bit_vartime(self, index: usize) -> Word { + if index >= LIMBS * Limb::BIT_SIZE { + 0 + } else { + (self.limbs[index / Limb::BIT_SIZE].0 >> (index % Limb::BIT_SIZE)) & 1 + } + } + + /// Calculate the number of bits needed to represent this number. + #[deprecated(note = "please use `bits_vartime` instead")] + #[inline(always)] + pub const fn bits(self) -> usize { + self.bits_vartime() + } + + /// Calculate the number of bits needed to represent this number. + #[allow(trivial_numeric_casts)] + pub const fn bits_vartime(self) -> usize { + let mut i = LIMBS - 1; + while i > 0 && self.limbs[i].0 == 0 { + i -= 1; + } + + let limb = self.limbs[i].0; + let bits = (Limb::BIT_SIZE * (i + 1)) as Word - limb.leading_zeros() as Word; + + Limb::ct_select( + Limb(bits), + Limb::ZERO, + !self.limbs[0].is_nonzero() & !Limb(i as Word).is_nonzero(), + ) + .0 as usize + } +} + +#[cfg(test)] +mod tests { + use crate::U128; + + #[test] + fn bit_vartime_ok() { + let u = U128::from_be_hex("f0010000000000000001000000010000"); + assert_eq!(u.bit_vartime(0), 0); + assert_eq!(u.bit_vartime(1), 0); + assert_eq!(u.bit_vartime(16), 1); + assert_eq!(u.bit_vartime(127), 1); + assert_eq!(u.bit_vartime(130), 0); + } +} diff --git a/vendor/crypto-bigint/src/uint/cmp.rs b/vendor/crypto-bigint/src/uint/cmp.rs new file mode 100644 index 000000000..19046df9b --- /dev/null +++ b/vendor/crypto-bigint/src/uint/cmp.rs @@ -0,0 +1,196 @@ +//! [`UInt`] comparisons. +//! +//! By default these are all constant-time and use the `subtle` crate. + +use super::UInt; +use crate::{limb::HI_BIT, Limb, SignedWord, WideSignedWord, Word, Zero}; +use core::cmp::Ordering; +use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess}; + +impl UInt { + /// Return `a` if `c`==0 or `b` if `c`==`Word::MAX`. + /// + /// Const-friendly: we can't yet use `subtle` in `const fn` contexts. + #[inline] + pub(crate) const fn ct_select(a: UInt, b: UInt, c: Word) -> Self { + let mut limbs = [Limb::ZERO; LIMBS]; + + let mut i = 0; + while i < LIMBS { + limbs[i] = Limb::ct_select(a.limbs[i], b.limbs[i], c); + i += 1; + } + + UInt { limbs } + } + + /// Returns all 1's if `self`!=0 or 0 if `self`==0. + /// + /// Const-friendly: we can't yet use `subtle` in `const fn` contexts. + #[inline] + pub(crate) const fn ct_is_nonzero(&self) -> Word { + let mut b = 0; + let mut i = 0; + while i < LIMBS { + b |= self.limbs[i].0; + i += 1; + } + Limb::is_nonzero(Limb(b)) + } + + /// Returns -1 if self < rhs + /// 0 if self == rhs + /// 1 if self > rhs + /// + /// Const-friendly: we can't yet use `subtle` in `const fn` contexts. + #[inline] + pub(crate) const fn ct_cmp(&self, rhs: &Self) -> SignedWord { + let mut gt = 0; + let mut lt = 0; + let mut i = LIMBS; + + while i > 0 { + let a = self.limbs[i - 1].0 as WideSignedWord; + let b = rhs.limbs[i - 1].0 as WideSignedWord; + gt |= ((b - a) >> Limb::BIT_SIZE) & 1 & !lt; + lt |= ((a - b) >> Limb::BIT_SIZE) & 1 & !gt; + i -= 1; + } + (gt as SignedWord) - (lt as SignedWord) + } + + /// Returns 0 if self == rhs or Word::MAX if self != rhs. + /// Const-friendly: we can't yet use `subtle` in `const fn` contexts. + #[inline] + pub(crate) const fn ct_not_eq(&self, rhs: &Self) -> Word { + let mut acc = 0; + let mut i = 0; + + while i < LIMBS { + acc |= self.limbs[i].0 ^ rhs.limbs[i].0; + i += 1; + } + let acc = acc as SignedWord; + ((acc | acc.wrapping_neg()) >> HI_BIT) as Word + } +} + +impl ConstantTimeEq for UInt { + #[inline] + fn ct_eq(&self, other: &Self) -> Choice { + Choice::from((!self.ct_not_eq(other) as u8) & 1) + } +} + +impl ConstantTimeGreater for UInt { + #[inline] + fn ct_gt(&self, other: &Self) -> Choice { + let underflow = other.sbb(self, Limb::ZERO).1; + !underflow.is_zero() + } +} + +impl ConstantTimeLess for UInt { + #[inline] + fn ct_lt(&self, other: &Self) -> Choice { + let underflow = self.sbb(other, Limb::ZERO).1; + !underflow.is_zero() + } +} + +impl Eq for UInt {} + +impl Ord for UInt { + fn cmp(&self, other: &Self) -> Ordering { + match self.ct_cmp(other) { + -1 => Ordering::Less, + 1 => Ordering::Greater, + n => { + debug_assert_eq!(n, 0); + debug_assert!(bool::from(self.ct_eq(other))); + Ordering::Equal + } + } + } +} + +impl PartialOrd for UInt { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for UInt { + fn eq(&self, other: &Self) -> bool { + self.ct_eq(other).into() + } +} + +#[cfg(test)] +mod tests { + use crate::{Integer, Zero, U128}; + use subtle::{ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess}; + + #[test] + fn is_zero() { + assert!(bool::from(U128::ZERO.is_zero())); + assert!(!bool::from(U128::ONE.is_zero())); + assert!(!bool::from(U128::MAX.is_zero())); + } + + #[test] + fn is_odd() { + assert!(!bool::from(U128::ZERO.is_odd())); + assert!(bool::from(U128::ONE.is_odd())); + assert!(bool::from(U128::MAX.is_odd())); + } + + #[test] + fn ct_eq() { + let a = U128::ZERO; + let b = U128::MAX; + + assert!(bool::from(a.ct_eq(&a))); + assert!(!bool::from(a.ct_eq(&b))); + assert!(!bool::from(b.ct_eq(&a))); + assert!(bool::from(b.ct_eq(&b))); + } + + #[test] + fn ct_gt() { + let a = U128::ZERO; + let b = U128::ONE; + let c = U128::MAX; + + assert!(bool::from(b.ct_gt(&a))); + assert!(bool::from(c.ct_gt(&a))); + assert!(bool::from(c.ct_gt(&b))); + + assert!(!bool::from(a.ct_gt(&a))); + assert!(!bool::from(b.ct_gt(&b))); + assert!(!bool::from(c.ct_gt(&c))); + + assert!(!bool::from(a.ct_gt(&b))); + assert!(!bool::from(a.ct_gt(&c))); + assert!(!bool::from(b.ct_gt(&c))); + } + + #[test] + fn ct_lt() { + let a = U128::ZERO; + let b = U128::ONE; + let c = U128::MAX; + + assert!(bool::from(a.ct_lt(&b))); + assert!(bool::from(a.ct_lt(&c))); + assert!(bool::from(b.ct_lt(&c))); + + assert!(!bool::from(a.ct_lt(&a))); + assert!(!bool::from(b.ct_lt(&b))); + assert!(!bool::from(c.ct_lt(&c))); + + assert!(!bool::from(b.ct_lt(&a))); + assert!(!bool::from(c.ct_lt(&a))); + assert!(!bool::from(c.ct_lt(&b))); + } +} diff --git a/vendor/crypto-bigint/src/uint/concat.rs b/vendor/crypto-bigint/src/uint/concat.rs new file mode 100644 index 000000000..e92960da7 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/concat.rs @@ -0,0 +1,60 @@ +// TODO(tarcieri): use `const_evaluatable_checked` when stable to make generic around bits. +macro_rules! impl_concat { + ($(($name:ident, $bits:expr)),+) => { + $( + impl $name { + /// Concatenate the two values, with `self` as most significant and `rhs` + /// as the least significant. + pub const fn concat(&self, rhs: &Self) -> UInt<{nlimbs!($bits) * 2}> { + let mut limbs = [Limb::ZERO; nlimbs!($bits) * 2]; + let mut i = 0; + let mut j = 0; + + while j < nlimbs!($bits) { + limbs[i] = rhs.limbs[j]; + i += 1; + j += 1; + } + + j = 0; + while j < nlimbs!($bits) { + limbs[i] = self.limbs[j]; + i += 1; + j += 1; + } + + UInt { limbs } + } + } + + impl Concat for $name { + type Output = UInt<{nlimbs!($bits) * 2}>; + + fn concat(&self, rhs: &Self) -> Self::Output { + self.concat(rhs) + } + } + + impl From<($name, $name)> for UInt<{nlimbs!($bits) * 2}> { + fn from(nums: ($name, $name)) -> UInt<{nlimbs!($bits) * 2}> { + nums.0.concat(&nums.1) + } + } + )+ + }; +} + +#[cfg(test)] +mod tests { + use crate::{U128, U64}; + + #[test] + fn concat() { + let hi = U64::from_u64(0x0011223344556677); + let lo = U64::from_u64(0x8899aabbccddeeff); + assert_eq!( + hi.concat(&lo), + U128::from_be_hex("00112233445566778899aabbccddeeff") + ); + } +} diff --git a/vendor/crypto-bigint/src/uint/div.rs b/vendor/crypto-bigint/src/uint/div.rs new file mode 100644 index 000000000..f7d9d6bf3 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/div.rs @@ -0,0 +1,496 @@ +//! [`UInt`] division operations. + +use super::UInt; +use crate::limb::Word; +use crate::{Integer, Limb, NonZero, Wrapping}; +use core::ops::{Div, DivAssign, Rem, RemAssign}; +use subtle::{Choice, CtOption}; + +impl UInt { + /// Computes `self` / `rhs`, returns the quotient (q), remainder (r) + /// and 1 for is_some or 0 for is_none. The results can be wrapped in [`CtOption`]. + /// NOTE: Use only if you need to access const fn. Otherwise use `div_rem` because + /// the value for is_some needs to be checked before using `q` and `r`. + /// + /// This is variable only with respect to `rhs`. + /// + /// When used with a fixed `rhs`, this function is constant-time with respect + /// to `self`. + pub(crate) const fn ct_div_rem(&self, rhs: &Self) -> (Self, Self, u8) { + let mb = rhs.bits_vartime(); + let mut bd = (LIMBS * Limb::BIT_SIZE) - mb; + let mut rem = *self; + let mut quo = Self::ZERO; + let mut c = rhs.shl_vartime(bd); + + loop { + let (mut r, borrow) = rem.sbb(&c, Limb::ZERO); + rem = Self::ct_select(r, rem, borrow.0); + r = quo.bitor(&Self::ONE); + quo = Self::ct_select(r, quo, borrow.0); + if bd == 0 { + break; + } + bd -= 1; + c = c.shr_vartime(1); + quo = quo.shl_vartime(1); + } + + let is_some = Limb(mb as Word).is_nonzero(); + quo = Self::ct_select(Self::ZERO, quo, is_some); + (quo, rem, (is_some & 1) as u8) + } + + /// Computes `self` % `rhs`, returns the remainder and + /// and 1 for is_some or 0 for is_none. The results can be wrapped in [`CtOption`]. + /// NOTE: Use only if you need to access const fn. Otherwise use `reduce` + /// This is variable only with respect to `rhs`. + /// + /// When used with a fixed `rhs`, this function is constant-time with respect + /// to `self`. + pub(crate) const fn ct_reduce(&self, rhs: &Self) -> (Self, u8) { + let mb = rhs.bits_vartime(); + let mut bd = (LIMBS * Limb::BIT_SIZE) - mb; + let mut rem = *self; + let mut c = rhs.shl_vartime(bd); + + loop { + let (r, borrow) = rem.sbb(&c, Limb::ZERO); + rem = Self::ct_select(r, rem, borrow.0); + if bd == 0 { + break; + } + bd -= 1; + c = c.shr_vartime(1); + } + + let is_some = Limb(mb as Word).is_nonzero(); + (rem, (is_some & 1) as u8) + } + + /// Computes `self` % 2^k. Faster than reduce since its a power of 2. + /// Limited to 2^16-1 since UInt doesn't support higher. + pub const fn reduce2k(&self, k: usize) -> Self { + let highest = (LIMBS - 1) as u32; + let index = k as u32 / (Limb::BIT_SIZE as u32); + let res = Limb::ct_cmp(Limb::from_u32(index), Limb::from_u32(highest)) - 1; + let le = Limb::is_nonzero(Limb(res as Word)); + let word = Limb::ct_select(Limb::from_u32(highest), Limb::from_u32(index), le).0 as usize; + + let base = k % Limb::BIT_SIZE; + let mask = (1 << base) - 1; + let mut out = *self; + + let outmask = Limb(out.limbs[word].0 & mask); + + out.limbs[word] = Limb::ct_select(out.limbs[word], outmask, le); + + let mut i = word + 1; + while i < LIMBS { + out.limbs[i] = Limb::ZERO; + i += 1; + } + + out + } + + /// Computes self / rhs, returns the quotient, remainder + /// if rhs != 0 + pub fn div_rem(&self, rhs: &Self) -> CtOption<(Self, Self)> { + let (q, r, c) = self.ct_div_rem(rhs); + CtOption::new((q, r), Choice::from(c)) + } + + /// Computes self % rhs, returns the remainder + /// if rhs != 0 + pub fn reduce(&self, rhs: &Self) -> CtOption { + let (r, c) = self.ct_reduce(rhs); + CtOption::new(r, Choice::from(c)) + } + + /// Wrapped division is just normal division i.e. `self` / `rhs` + /// There’s no way wrapping could ever happen. + /// This function exists, so that all operations are accounted for in the wrapping operations. + pub const fn wrapping_div(&self, rhs: &Self) -> Self { + let (q, _, c) = self.ct_div_rem(rhs); + assert!(c == 1, "divide by zero"); + q + } + + /// Perform checked division, returning a [`CtOption`] which `is_some` + /// only if the rhs != 0 + pub fn checked_div(&self, rhs: &Self) -> CtOption { + let (q, _, c) = self.ct_div_rem(rhs); + CtOption::new(q, Choice::from(c)) + } + + /// Wrapped (modular) remainder calculation is just `self` % `rhs`. + /// There’s no way wrapping could ever happen. + /// This function exists, so that all operations are accounted for in the wrapping operations. + pub const fn wrapping_rem(&self, rhs: &Self) -> Self { + let (r, c) = self.ct_reduce(rhs); + assert!(c == 1, "modulo zero"); + r + } + + /// Perform checked reduction, returning a [`CtOption`] which `is_some` + /// only if the rhs != 0 + pub fn checked_rem(&self, rhs: &Self) -> CtOption { + let (r, c) = self.ct_reduce(rhs); + CtOption::new(r, Choice::from(c)) + } +} + +impl Div<&NonZero>> for &UInt +where + UInt: Integer, +{ + type Output = UInt; + + fn div(self, rhs: &NonZero>) -> Self::Output { + *self / *rhs + } +} + +impl Div<&NonZero>> for UInt +where + UInt: Integer, +{ + type Output = UInt; + + fn div(self, rhs: &NonZero>) -> Self::Output { + self / *rhs + } +} + +impl Div>> for &UInt +where + UInt: Integer, +{ + type Output = UInt; + + fn div(self, rhs: NonZero>) -> Self::Output { + *self / rhs + } +} + +impl Div>> for UInt +where + UInt: Integer, +{ + type Output = UInt; + + fn div(self, rhs: NonZero>) -> Self::Output { + let (q, _, _) = self.ct_div_rem(&rhs); + q + } +} + +impl DivAssign<&NonZero>> for UInt +where + UInt: Integer, +{ + fn div_assign(&mut self, rhs: &NonZero>) { + let (q, _, _) = self.ct_div_rem(rhs); + *self = q + } +} + +impl DivAssign>> for UInt +where + UInt: Integer, +{ + fn div_assign(&mut self, rhs: NonZero>) { + *self /= &rhs; + } +} + +impl Div>> for Wrapping> { + type Output = Wrapping>; + + fn div(self, rhs: NonZero>) -> Self::Output { + Wrapping(self.0.wrapping_div(rhs.as_ref())) + } +} + +impl Div>> for &Wrapping> { + type Output = Wrapping>; + + fn div(self, rhs: NonZero>) -> Self::Output { + *self / rhs + } +} + +impl Div<&NonZero>> for &Wrapping> { + type Output = Wrapping>; + + fn div(self, rhs: &NonZero>) -> Self::Output { + *self / *rhs + } +} + +impl Div<&NonZero>> for Wrapping> { + type Output = Wrapping>; + + fn div(self, rhs: &NonZero>) -> Self::Output { + self / *rhs + } +} + +impl DivAssign<&NonZero>> for Wrapping> { + fn div_assign(&mut self, rhs: &NonZero>) { + *self = Wrapping(self.0.wrapping_div(rhs.as_ref())) + } +} + +impl DivAssign>> for Wrapping> { + fn div_assign(&mut self, rhs: NonZero>) { + *self /= &rhs; + } +} + +impl Rem<&NonZero>> for &UInt +where + UInt: Integer, +{ + type Output = UInt; + + fn rem(self, rhs: &NonZero>) -> Self::Output { + *self % *rhs + } +} + +impl Rem<&NonZero>> for UInt +where + UInt: Integer, +{ + type Output = UInt; + + fn rem(self, rhs: &NonZero>) -> Self::Output { + self % *rhs + } +} + +impl Rem>> for &UInt +where + UInt: Integer, +{ + type Output = UInt; + + fn rem(self, rhs: NonZero>) -> Self::Output { + *self % rhs + } +} + +impl Rem>> for UInt +where + UInt: Integer, +{ + type Output = UInt; + + fn rem(self, rhs: NonZero>) -> Self::Output { + let (r, _) = self.ct_reduce(&rhs); + r + } +} + +impl RemAssign<&NonZero>> for UInt +where + UInt: Integer, +{ + fn rem_assign(&mut self, rhs: &NonZero>) { + let (r, _) = self.ct_reduce(rhs); + *self = r + } +} + +impl RemAssign>> for UInt +where + UInt: Integer, +{ + fn rem_assign(&mut self, rhs: NonZero>) { + *self %= &rhs; + } +} + +impl Rem>> for Wrapping> { + type Output = Wrapping>; + + fn rem(self, rhs: NonZero>) -> Self::Output { + Wrapping(self.0.wrapping_rem(rhs.as_ref())) + } +} + +impl Rem>> for &Wrapping> { + type Output = Wrapping>; + + fn rem(self, rhs: NonZero>) -> Self::Output { + *self % rhs + } +} + +impl Rem<&NonZero>> for &Wrapping> { + type Output = Wrapping>; + + fn rem(self, rhs: &NonZero>) -> Self::Output { + *self % *rhs + } +} + +impl Rem<&NonZero>> for Wrapping> { + type Output = Wrapping>; + + fn rem(self, rhs: &NonZero>) -> Self::Output { + self % *rhs + } +} + +impl RemAssign>> for Wrapping> { + fn rem_assign(&mut self, rhs: NonZero>) { + *self %= &rhs; + } +} + +impl RemAssign<&NonZero>> for Wrapping> { + fn rem_assign(&mut self, rhs: &NonZero>) { + *self = Wrapping(self.0.wrapping_rem(rhs.as_ref())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{limb::HI_BIT, Limb, U256}; + + #[cfg(feature = "rand")] + use { + crate::{CheckedMul, Random}, + rand_chacha::ChaChaRng, + rand_core::RngCore, + rand_core::SeedableRng, + }; + + #[test] + fn div_word() { + for (n, d, e, ee) in &[ + (200u64, 2u64, 100u64, 0), + (100u64, 25u64, 4u64, 0), + (100u64, 10u64, 10u64, 0), + (1024u64, 8u64, 128u64, 0), + (27u64, 13u64, 2u64, 1u64), + (26u64, 13u64, 2u64, 0u64), + (14u64, 13u64, 1u64, 1u64), + (13u64, 13u64, 1u64, 0u64), + (12u64, 13u64, 0u64, 12u64), + (1u64, 13u64, 0u64, 1u64), + ] { + let lhs = U256::from(*n); + let rhs = U256::from(*d); + let (q, r, is_some) = lhs.ct_div_rem(&rhs); + assert_eq!(is_some, 1); + assert_eq!(U256::from(*e), q); + assert_eq!(U256::from(*ee), r); + } + } + + #[cfg(feature = "rand")] + #[test] + fn div() { + let mut rng = ChaChaRng::from_seed([7u8; 32]); + for _ in 0..25 { + let num = U256::random(&mut rng).shr_vartime(128); + let den = U256::random(&mut rng).shr_vartime(128); + let n = num.checked_mul(&den); + if n.is_some().unwrap_u8() == 1 { + let (q, _, is_some) = n.unwrap().ct_div_rem(&den); + assert_eq!(is_some, 1); + assert_eq!(q, num); + } + } + } + + #[test] + fn div_max() { + let mut a = U256::ZERO; + let mut b = U256::ZERO; + b.limbs[b.limbs.len() - 1] = Limb(Word::MAX); + let q = a.wrapping_div(&b); + assert_eq!(q, UInt::ZERO); + a.limbs[a.limbs.len() - 1] = Limb(1 << (HI_BIT - 7)); + b.limbs[b.limbs.len() - 1] = Limb(0x82 << (HI_BIT - 7)); + let q = a.wrapping_div(&b); + assert_eq!(q, UInt::ZERO); + } + + #[test] + fn div_zero() { + let (q, r, is_some) = U256::ONE.ct_div_rem(&U256::ZERO); + assert_eq!(is_some, 0); + assert_eq!(q, U256::ZERO); + assert_eq!(r, U256::ONE); + } + + #[test] + fn div_one() { + let (q, r, is_some) = U256::from(10u8).ct_div_rem(&U256::ONE); + assert_eq!(is_some, 1); + assert_eq!(q, U256::from(10u8)); + assert_eq!(r, U256::ZERO); + } + + #[test] + fn reduce_one() { + let (r, is_some) = U256::from(10u8).ct_reduce(&U256::ONE); + assert_eq!(is_some, 1); + assert_eq!(r, U256::ZERO); + } + + #[test] + fn reduce_zero() { + let u = U256::from(10u8); + let (r, is_some) = u.ct_reduce(&U256::ZERO); + assert_eq!(is_some, 0); + assert_eq!(r, u); + } + + #[test] + fn reduce_tests() { + let (r, is_some) = U256::from(10u8).ct_reduce(&U256::from(2u8)); + assert_eq!(is_some, 1); + assert_eq!(r, U256::ZERO); + let (r, is_some) = U256::from(10u8).ct_reduce(&U256::from(3u8)); + assert_eq!(is_some, 1); + assert_eq!(r, U256::ONE); + let (r, is_some) = U256::from(10u8).ct_reduce(&U256::from(7u8)); + assert_eq!(is_some, 1); + assert_eq!(r, U256::from(3u8)); + } + + #[test] + fn reduce_max() { + let mut a = U256::ZERO; + let mut b = U256::ZERO; + b.limbs[b.limbs.len() - 1] = Limb(Word::MAX); + let r = a.wrapping_rem(&b); + assert_eq!(r, UInt::ZERO); + a.limbs[a.limbs.len() - 1] = Limb(1 << (HI_BIT - 7)); + b.limbs[b.limbs.len() - 1] = Limb(0x82 << (HI_BIT - 7)); + let r = a.wrapping_rem(&b); + assert_eq!(r, a); + } + + #[cfg(feature = "rand")] + #[test] + fn reduce2krand() { + let mut rng = ChaChaRng::from_seed([7u8; 32]); + for _ in 0..25 { + let num = U256::random(&mut rng); + let k = (rng.next_u32() % 256) as usize; + let den = U256::ONE.shl_vartime(k); + + let a = num.reduce2k(k); + let e = num.wrapping_rem(&den); + assert_eq!(a, e); + } + } +} diff --git a/vendor/crypto-bigint/src/uint/encoding.rs b/vendor/crypto-bigint/src/uint/encoding.rs new file mode 100644 index 000000000..a83976238 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/encoding.rs @@ -0,0 +1,278 @@ +//! Const-friendly decoding operations for [`UInt`] + +#[cfg(all(feature = "der", feature = "generic-array"))] +mod der; + +#[cfg(feature = "rlp")] +mod rlp; + +use super::UInt; +use crate::{Encoding, Limb, Word}; + +impl UInt { + /// Create a new [`UInt`] from the provided big endian bytes. + pub const fn from_be_slice(bytes: &[u8]) -> Self { + assert!( + bytes.len() == Limb::BYTE_SIZE * LIMBS, + "bytes are not the expected size" + ); + + let mut res = [Limb::ZERO; LIMBS]; + let mut buf = [0u8; Limb::BYTE_SIZE]; + let mut i = 0; + + while i < LIMBS { + let mut j = 0; + while j < Limb::BYTE_SIZE { + buf[j] = bytes[i * Limb::BYTE_SIZE + j]; + j += 1; + } + res[LIMBS - i - 1] = Limb(Word::from_be_bytes(buf)); + i += 1; + } + + UInt::new(res) + } + + /// Create a new [`UInt`] from the provided big endian hex string. + pub const fn from_be_hex(hex: &str) -> Self { + let bytes = hex.as_bytes(); + + assert!( + bytes.len() == Limb::BYTE_SIZE * LIMBS * 2, + "hex string is not the expected size" + ); + + let mut res = [Limb::ZERO; LIMBS]; + let mut buf = [0u8; Limb::BYTE_SIZE]; + let mut i = 0; + + while i < LIMBS { + let mut j = 0; + while j < Limb::BYTE_SIZE { + let offset = (i * Limb::BYTE_SIZE + j) * 2; + buf[j] = decode_hex_byte([bytes[offset], bytes[offset + 1]]); + j += 1; + } + res[LIMBS - i - 1] = Limb(Word::from_be_bytes(buf)); + i += 1; + } + + UInt::new(res) + } + + /// Create a new [`UInt`] from the provided little endian bytes. + pub const fn from_le_slice(bytes: &[u8]) -> Self { + assert!( + bytes.len() == Limb::BYTE_SIZE * LIMBS, + "bytes are not the expected size" + ); + + let mut res = [Limb::ZERO; LIMBS]; + let mut buf = [0u8; Limb::BYTE_SIZE]; + let mut i = 0; + + while i < LIMBS { + let mut j = 0; + while j < Limb::BYTE_SIZE { + buf[j] = bytes[i * Limb::BYTE_SIZE + j]; + j += 1; + } + res[i] = Limb(Word::from_le_bytes(buf)); + i += 1; + } + + UInt::new(res) + } + + /// Create a new [`UInt`] from the provided little endian hex string. + pub const fn from_le_hex(hex: &str) -> Self { + let bytes = hex.as_bytes(); + + assert!( + bytes.len() == Limb::BYTE_SIZE * LIMBS * 2, + "bytes are not the expected size" + ); + + let mut res = [Limb::ZERO; LIMBS]; + let mut buf = [0u8; Limb::BYTE_SIZE]; + let mut i = 0; + + while i < LIMBS { + let mut j = 0; + while j < Limb::BYTE_SIZE { + let offset = (i * Limb::BYTE_SIZE + j) * 2; + buf[j] = decode_hex_byte([bytes[offset], bytes[offset + 1]]); + j += 1; + } + res[i] = Limb(Word::from_le_bytes(buf)); + i += 1; + } + + UInt::new(res) + } + + /// Serialize this [`UInt`] as big-endian, writing it into the provided + /// byte slice. + #[inline] + #[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))] + pub(crate) fn write_be_bytes(&self, out: &mut [u8]) { + debug_assert_eq!(out.len(), Limb::BYTE_SIZE * LIMBS); + + for (src, dst) in self + .limbs + .iter() + .rev() + .cloned() + .zip(out.chunks_exact_mut(Limb::BYTE_SIZE)) + { + dst.copy_from_slice(&src.to_be_bytes()); + } + } + + /// Serialize this [`UInt`] as little-endian, writing it into the provided + /// byte slice. + #[inline] + #[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))] + pub(crate) fn write_le_bytes(&self, out: &mut [u8]) { + debug_assert_eq!(out.len(), Limb::BYTE_SIZE * LIMBS); + + for (src, dst) in self + .limbs + .iter() + .cloned() + .zip(out.chunks_exact_mut(Limb::BYTE_SIZE)) + { + dst.copy_from_slice(&src.to_le_bytes()); + } + } +} + +/// Decode a single byte encoded as two hexadecimal characters. +const fn decode_hex_byte(bytes: [u8; 2]) -> u8 { + let mut i = 0; + let mut result = 0u8; + + while i < 2 { + result <<= 4; + result |= match bytes[i] { + b @ b'0'..=b'9' => b - b'0', + b @ b'a'..=b'f' => 10 + b - b'a', + b @ b'A'..=b'F' => 10 + b - b'A', + b => { + assert!( + matches!(b, b'0'..=b'9' | b'a' ..= b'f' | b'A'..=b'F'), + "invalid hex byte" + ); + 0 + } + }; + + i += 1; + } + + result +} + +#[cfg(test)] +mod tests { + use crate::Limb; + use hex_literal::hex; + + #[cfg(feature = "alloc")] + use {crate::U128, alloc::format}; + + #[cfg(target_pointer_width = "32")] + use crate::U64 as UIntEx; + + #[cfg(target_pointer_width = "64")] + use crate::U128 as UIntEx; + + #[test] + #[cfg(target_pointer_width = "32")] + fn from_be_slice() { + let bytes = hex!("0011223344556677"); + let n = UIntEx::from_be_slice(&bytes); + assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn from_be_slice() { + let bytes = hex!("00112233445566778899aabbccddeeff"); + let n = UIntEx::from_be_slice(&bytes); + assert_eq!( + n.limbs(), + &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)] + ); + } + + #[test] + #[cfg(target_pointer_width = "32")] + fn from_le_slice() { + let bytes = hex!("7766554433221100"); + let n = UIntEx::from_le_slice(&bytes); + assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn from_le_slice() { + let bytes = hex!("ffeeddccbbaa99887766554433221100"); + let n = UIntEx::from_le_slice(&bytes); + assert_eq!( + n.limbs(), + &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)] + ); + } + + #[test] + #[cfg(target_pointer_width = "32")] + fn from_be_hex() { + let n = UIntEx::from_be_hex("0011223344556677"); + assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn from_be_hex() { + let n = UIntEx::from_be_hex("00112233445566778899aabbccddeeff"); + assert_eq!( + n.limbs(), + &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)] + ); + } + + #[test] + #[cfg(target_pointer_width = "32")] + fn from_le_hex() { + let n = UIntEx::from_le_hex("7766554433221100"); + assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn from_le_hex() { + let n = UIntEx::from_le_hex("ffeeddccbbaa99887766554433221100"); + assert_eq!( + n.limbs(), + &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)] + ); + } + + #[cfg(feature = "alloc")] + #[test] + fn hex_upper() { + let hex = "AAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD"; + let n = U128::from_be_hex(hex); + assert_eq!(hex, format!("{:X}", n)); + } + + #[cfg(feature = "alloc")] + #[test] + fn hex_lower() { + let hex = "aaaaaaaabbbbbbbbccccccccdddddddd"; + let n = U128::from_be_hex(hex); + assert_eq!(hex, format!("{:x}", n)); + } +} diff --git a/vendor/crypto-bigint/src/uint/encoding/der.rs b/vendor/crypto-bigint/src/uint/encoding/der.rs new file mode 100644 index 000000000..cf1b9c31e --- /dev/null +++ b/vendor/crypto-bigint/src/uint/encoding/der.rs @@ -0,0 +1,69 @@ +//! Support for decoding/encoding [`UInt`] as an ASN.1 DER `INTEGER`. + +use crate::{generic_array::GenericArray, ArrayEncoding, UInt}; +use ::der::{ + asn1::{AnyRef, UIntRef}, + DecodeValue, EncodeValue, FixedTag, Length, Tag, +}; + +#[cfg_attr(docsrs, doc(cfg(feature = "der")))] +impl<'a, const LIMBS: usize> TryFrom> for UInt +where + UInt: ArrayEncoding, +{ + type Error = der::Error; + + fn try_from(any: AnyRef<'a>) -> der::Result> { + UIntRef::try_from(any)?.try_into() + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "der")))] +impl<'a, const LIMBS: usize> TryFrom> for UInt +where + UInt: ArrayEncoding, +{ + type Error = der::Error; + + fn try_from(bytes: UIntRef<'a>) -> der::Result> { + let mut array = GenericArray::default(); + let offset = array.len().saturating_sub(bytes.len().try_into()?); + array[offset..].copy_from_slice(bytes.as_bytes()); + Ok(UInt::from_be_byte_array(array)) + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "der")))] +impl<'a, const LIMBS: usize> DecodeValue<'a> for UInt +where + UInt: ArrayEncoding, +{ + fn decode_value>(reader: &mut R, header: der::Header) -> der::Result { + UIntRef::decode_value(reader, header)?.try_into() + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "der")))] +impl EncodeValue for UInt +where + UInt: ArrayEncoding, +{ + fn value_len(&self) -> der::Result { + // TODO(tarcieri): more efficient length calculation + let array = self.to_be_byte_array(); + UIntRef::new(&array)?.value_len() + } + + fn encode_value(&self, encoder: &mut dyn der::Writer) -> der::Result<()> { + let array = self.to_be_byte_array(); + UIntRef::new(&array)?.encode_value(encoder) + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "der")))] +impl FixedTag for UInt +where + UInt: ArrayEncoding, +{ + const TAG: Tag = Tag::Integer; +} diff --git a/vendor/crypto-bigint/src/uint/encoding/rlp.rs b/vendor/crypto-bigint/src/uint/encoding/rlp.rs new file mode 100644 index 000000000..8a10170d5 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/encoding/rlp.rs @@ -0,0 +1,79 @@ +//! Recursive Length Prefix (RLP) encoding support. + +use crate::{Encoding, UInt}; +use rlp::{DecoderError, Rlp, RlpStream}; + +#[cfg_attr(docsrs, doc(cfg(feature = "rlp")))] +impl rlp::Encodable for UInt +where + Self: Encoding, +{ + fn rlp_append(&self, stream: &mut RlpStream) { + let bytes = self.to_be_bytes(); + let mut bytes_stripped = bytes.as_ref(); + + while bytes_stripped.first().cloned() == Some(0) { + bytes_stripped = &bytes_stripped[1..]; + } + + stream.encoder().encode_value(bytes_stripped); + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "rlp")))] +impl rlp::Decodable for UInt +where + Self: Encoding, + ::Repr: Default, +{ + fn decode(rlp: &Rlp<'_>) -> Result { + rlp.decoder().decode_value(|bytes| { + if bytes.first().cloned() == Some(0) { + Err(rlp::DecoderError::RlpInvalidIndirection) + } else { + let mut repr = ::Repr::default(); + let offset = repr + .as_ref() + .len() + .checked_sub(bytes.len()) + .ok_or(DecoderError::RlpIsTooBig)?; + + repr.as_mut()[offset..].copy_from_slice(bytes); + Ok(Self::from_be_bytes(repr)) + } + }) + } +} + +#[cfg(test)] +mod tests { + use crate::U256; + use hex_literal::hex; + + /// U256 test vectors from the `rlp` crate. + /// + /// + const U256_VECTORS: &[(U256, &[u8])] = &[ + (U256::ZERO, &hex!("80")), + ( + U256::from_be_hex("0000000000000000000000000000000000000000000000000000000001000000"), + &hex!("8401000000"), + ), + ( + U256::from_be_hex("00000000000000000000000000000000000000000000000000000000ffffffff"), + &hex!("84ffffffff"), + ), + ( + U256::from_be_hex("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0"), + &hex!("a08090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0"), + ), + ]; + + #[test] + fn round_trip() { + for &(uint, expected_bytes) in U256_VECTORS { + assert_eq!(rlp::encode(&uint), expected_bytes); + assert_eq!(rlp::decode::(expected_bytes).unwrap(), uint); + } + } +} diff --git a/vendor/crypto-bigint/src/uint/from.rs b/vendor/crypto-bigint/src/uint/from.rs new file mode 100644 index 000000000..daa5b7092 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/from.rs @@ -0,0 +1,238 @@ +//! `From`-like conversions for [`UInt`]. + +use crate::{Limb, UInt, WideWord, Word, U128, U64}; + +impl UInt { + /// Create a [`UInt`] from a `u8` (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u8(n: u8) -> Self { + assert!(LIMBS >= 1, "number of limbs must be greater than zero"); + let mut limbs = [Limb::ZERO; LIMBS]; + limbs[0].0 = n as Word; + Self { limbs } + } + + /// Create a [`UInt`] from a `u16` (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u16(n: u16) -> Self { + assert!(LIMBS >= 1, "number of limbs must be greater than zero"); + let mut limbs = [Limb::ZERO; LIMBS]; + limbs[0].0 = n as Word; + Self { limbs } + } + + /// Create a [`UInt`] from a `u32` (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + #[allow(trivial_numeric_casts)] + pub const fn from_u32(n: u32) -> Self { + assert!(LIMBS >= 1, "number of limbs must be greater than zero"); + let mut limbs = [Limb::ZERO; LIMBS]; + limbs[0].0 = n as Word; + Self { limbs } + } + + /// Create a [`UInt`] from a `u64` (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + #[cfg(target_pointer_width = "32")] + pub const fn from_u64(n: u64) -> Self { + assert!(LIMBS >= 2, "number of limbs must be two or greater"); + let mut limbs = [Limb::ZERO; LIMBS]; + limbs[0].0 = (n & 0xFFFFFFFF) as u32; + limbs[1].0 = (n >> 32) as u32; + Self { limbs } + } + + /// Create a [`UInt`] from a `u64` (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + #[cfg(target_pointer_width = "64")] + pub const fn from_u64(n: u64) -> Self { + assert!(LIMBS >= 1, "number of limbs must be greater than zero"); + let mut limbs = [Limb::ZERO; LIMBS]; + limbs[0].0 = n; + Self { limbs } + } + + /// Create a [`UInt`] from a `u128` (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_u128(n: u128) -> Self { + assert!( + LIMBS >= (128 / Limb::BIT_SIZE), + "number of limbs must be greater than zero" + ); + + let lo = U64::from_u64((n & 0xffff_ffff_ffff_ffff) as u64); + let hi = U64::from_u64((n >> 64) as u64); + + let mut limbs = [Limb::ZERO; LIMBS]; + + let mut i = 0; + while i < lo.limbs.len() { + limbs[i] = lo.limbs[i]; + i += 1; + } + + let mut j = 0; + while j < hi.limbs.len() { + limbs[i + j] = hi.limbs[j]; + j += 1; + } + + Self { limbs } + } + + /// Create a [`UInt`] from a `Word` (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_word(n: Word) -> Self { + assert!(LIMBS >= 1, "number of limbs must be greater than zero"); + let mut limbs = [Limb::ZERO; LIMBS]; + limbs[0].0 = n; + Self { limbs } + } + + /// Create a [`UInt`] from a `WideWord` (const-friendly) + // TODO(tarcieri): replace with `const impl From` when stable + pub const fn from_wide_word(n: WideWord) -> Self { + assert!(LIMBS >= 2, "number of limbs must be two or greater"); + let mut limbs = [Limb::ZERO; LIMBS]; + limbs[0].0 = n as Word; + limbs[1].0 = (n >> Limb::BIT_SIZE) as Word; + Self { limbs } + } +} + +impl From for UInt { + fn from(n: u8) -> Self { + // TODO(tarcieri): const where clause when possible + debug_assert!(LIMBS > 0, "limbs must be non-zero"); + Self::from_u8(n) + } +} + +impl From for UInt { + fn from(n: u16) -> Self { + // TODO(tarcieri): const where clause when possible + debug_assert!(LIMBS > 0, "limbs must be non-zero"); + Self::from_u16(n) + } +} + +impl From for UInt { + fn from(n: u32) -> Self { + // TODO(tarcieri): const where clause when possible + debug_assert!(LIMBS > 0, "limbs must be non-zero"); + Self::from_u32(n) + } +} + +impl From for UInt { + fn from(n: u64) -> Self { + // TODO(tarcieri): const where clause when possible + debug_assert!(LIMBS >= (64 / Limb::BIT_SIZE), "not enough limbs"); + Self::from_u64(n) + } +} + +impl From for UInt { + fn from(n: u128) -> Self { + // TODO(tarcieri): const where clause when possible + debug_assert!(LIMBS >= (128 / Limb::BIT_SIZE), "not enough limbs"); + Self::from_u128(n) + } +} + +#[cfg(target_pointer_width = "32")] +#[cfg_attr(docsrs, doc(cfg(target_pointer_width = "32")))] +impl From for u64 { + fn from(n: U64) -> u64 { + (n.limbs[0].0 as u64) | ((n.limbs[1].0 as u64) << 32) + } +} + +#[cfg(target_pointer_width = "64")] +#[cfg_attr(docsrs, doc(cfg(target_pointer_width = "64")))] +impl From for u64 { + fn from(n: U64) -> u64 { + n.limbs[0].into() + } +} + +impl From for u128 { + fn from(n: U128) -> u128 { + let (hi, lo) = n.split(); + (u64::from(hi) as u128) << 64 | (u64::from(lo) as u128) + } +} + +impl From<[Word; LIMBS]> for UInt { + fn from(arr: [Word; LIMBS]) -> Self { + Self::from_words(arr) + } +} + +impl From> for [Word; LIMBS] { + fn from(n: UInt) -> [Word; LIMBS] { + *n.as_ref() + } +} + +impl From<[Limb; LIMBS]> for UInt { + fn from(limbs: [Limb; LIMBS]) -> Self { + Self { limbs } + } +} + +impl From> for [Limb; LIMBS] { + fn from(n: UInt) -> [Limb; LIMBS] { + n.limbs + } +} + +impl From for UInt { + fn from(limb: Limb) -> Self { + limb.0.into() + } +} + +#[cfg(test)] +mod tests { + use crate::{Limb, Word, U128}; + + #[cfg(target_pointer_width = "32")] + use crate::U64 as UIntEx; + + #[cfg(target_pointer_width = "64")] + use crate::U128 as UIntEx; + + #[test] + fn from_u8() { + let n = UIntEx::from(42u8); + assert_eq!(n.limbs(), &[Limb(42), Limb(0)]); + } + + #[test] + fn from_u16() { + let n = UIntEx::from(42u16); + assert_eq!(n.limbs(), &[Limb(42), Limb(0)]); + } + + #[test] + fn from_u64() { + let n = UIntEx::from(42u64); + assert_eq!(n.limbs(), &[Limb(42), Limb(0)]); + } + + #[test] + fn from_u128() { + let n = U128::from(42u128); + assert_eq!(&n.limbs()[..2], &[Limb(42), Limb(0)]); + assert_eq!(u128::from(n), 42u128); + } + + #[test] + fn array_round_trip() { + let arr1 = [1, 2]; + let n = UIntEx::from(arr1); + let arr2: [Word; 2] = n.into(); + assert_eq!(arr1, arr2); + } +} diff --git a/vendor/crypto-bigint/src/uint/inv_mod.rs b/vendor/crypto-bigint/src/uint/inv_mod.rs new file mode 100644 index 000000000..a11408564 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/inv_mod.rs @@ -0,0 +1,62 @@ +use super::UInt; +use crate::Limb; + +impl UInt { + /// Computes 1/`self` mod 2^k as specified in Algorithm 4 from + /// A Secure Algorithm for Inversion Modulo 2k by + /// Sadiel de la Fe and Carles Ferrer. See + /// . + /// + /// Conditions: `self` < 2^k and `self` must be odd + pub const fn inv_mod2k(&self, k: usize) -> Self { + let mut x = Self::ZERO; + let mut b = Self::ONE; + let mut i = 0; + + while i < k { + let mut x_i = Self::ZERO; + let j = b.limbs[0].0 & 1; + x_i.limbs[0] = Limb(j); + x = x.bitor(&x_i.shl_vartime(i)); + + let t = b.wrapping_sub(self); + b = Self::ct_select(b, t, j.wrapping_neg()).shr_vartime(1); + i += 1; + } + x + } +} + +#[cfg(test)] +mod tests { + use crate::U256; + + #[test] + fn inv_mod2k() { + let v = U256::from_be_slice(&[ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xff, 0xff, 0xfc, 0x2f, + ]); + let e = U256::from_be_slice(&[ + 0x36, 0x42, 0xe6, 0xfa, 0xea, 0xac, 0x7c, 0x66, 0x63, 0xb9, 0x3d, 0x3d, 0x6a, 0x0d, + 0x48, 0x9e, 0x43, 0x4d, 0xdc, 0x01, 0x23, 0xdb, 0x5f, 0xa6, 0x27, 0xc7, 0xf6, 0xe2, + 0x2d, 0xda, 0xca, 0xcf, + ]); + let a = v.inv_mod2k(256); + assert_eq!(e, a); + + let v = U256::from_be_slice(&[ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, + 0xd0, 0x36, 0x41, 0x41, + ]); + let e = U256::from_be_slice(&[ + 0x26, 0x17, 0x76, 0xf2, 0x9b, 0x6b, 0x10, 0x6c, 0x76, 0x80, 0xcf, 0x3e, 0xd8, 0x30, + 0x54, 0xa1, 0xaf, 0x5a, 0xe5, 0x37, 0xcb, 0x46, 0x13, 0xdb, 0xb4, 0xf2, 0x00, 0x99, + 0xaa, 0x77, 0x4e, 0xc1, + ]); + let a = v.inv_mod2k(256); + assert_eq!(e, a); + } +} diff --git a/vendor/crypto-bigint/src/uint/mul.rs b/vendor/crypto-bigint/src/uint/mul.rs new file mode 100644 index 000000000..ecb32fd10 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/mul.rs @@ -0,0 +1,246 @@ +//! [`UInt`] addition operations. + +use crate::{Checked, CheckedMul, Concat, Limb, UInt, Wrapping, Zero}; +use core::ops::{Mul, MulAssign}; +use subtle::CtOption; + +impl UInt { + /// Compute "wide" multiplication, with a product twice the size of the input. + /// + /// Returns a tuple containing the `(lo, hi)` components of the product. + /// + /// # Ordering note + /// + /// Releases of `crypto-bigint` prior to v0.3 used `(hi, lo)` ordering + /// instead. This has been changed for better consistency with the rest of + /// the APIs in this crate. + /// + /// For more info see: + // TODO(tarcieri): use `concat` to construct a wide output + pub const fn mul_wide(&self, rhs: &Self) -> (Self, Self) { + let mut i = 0; + let mut lo = Self::ZERO; + let mut hi = Self::ZERO; + + // Schoolbook multiplication. + // TODO(tarcieri): use Karatsuba for better performance? + while i < LIMBS { + let mut j = 0; + let mut carry = Limb::ZERO; + + while j < LIMBS { + let k = i + j; + + if k >= LIMBS { + let (n, c) = hi.limbs[k - LIMBS].mac(self.limbs[i], rhs.limbs[j], carry); + hi.limbs[k - LIMBS] = n; + carry = c; + } else { + let (n, c) = lo.limbs[k].mac(self.limbs[i], rhs.limbs[j], carry); + lo.limbs[k] = n; + carry = c; + } + + j += 1; + } + + hi.limbs[i + j - LIMBS] = carry; + i += 1; + } + + (lo, hi) + } + + /// Perform saturating multiplication, returning `MAX` on overflow. + pub const fn saturating_mul(&self, rhs: &Self) -> Self { + let (res, overflow) = self.mul_wide(rhs); + + let mut i = 0; + let mut accumulator = 0; + + while i < LIMBS { + accumulator |= overflow.limbs[i].0; + i += 1; + } + + if accumulator == 0 { + res + } else { + Self::MAX + } + } + + /// Perform wrapping multiplication, discarding overflow. + pub const fn wrapping_mul(&self, rhs: &Self) -> Self { + self.mul_wide(rhs).0 + } + + /// Square self, returning a "wide" result. + pub fn square(&self) -> ::Output + where + Self: Concat, + { + let (lo, hi) = self.mul_wide(self); + hi.concat(&lo) + } +} + +impl CheckedMul<&UInt> for UInt { + type Output = Self; + + fn checked_mul(&self, rhs: &Self) -> CtOption { + let (lo, hi) = self.mul_wide(rhs); + CtOption::new(lo, hi.is_zero()) + } +} + +impl Mul for Wrapping> { + type Output = Self; + + fn mul(self, rhs: Self) -> Wrapping> { + Wrapping(self.0.wrapping_mul(&rhs.0)) + } +} + +impl Mul<&Wrapping>> for Wrapping> { + type Output = Wrapping>; + + fn mul(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.wrapping_mul(&rhs.0)) + } +} + +impl Mul>> for &Wrapping> { + type Output = Wrapping>; + + fn mul(self, rhs: Wrapping>) -> Wrapping> { + Wrapping(self.0.wrapping_mul(&rhs.0)) + } +} + +impl Mul<&Wrapping>> for &Wrapping> { + type Output = Wrapping>; + + fn mul(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.wrapping_mul(&rhs.0)) + } +} + +impl MulAssign for Wrapping> { + fn mul_assign(&mut self, other: Self) { + *self = *self * other; + } +} + +impl MulAssign<&Wrapping>> for Wrapping> { + fn mul_assign(&mut self, other: &Self) { + *self = *self * other; + } +} + +impl Mul for Checked> { + type Output = Self; + + fn mul(self, rhs: Self) -> Checked> { + Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b)))) + } +} + +impl Mul<&Checked>> for Checked> { + type Output = Checked>; + + fn mul(self, rhs: &Checked>) -> Checked> { + Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b)))) + } +} + +impl Mul>> for &Checked> { + type Output = Checked>; + + fn mul(self, rhs: Checked>) -> Checked> { + Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b)))) + } +} + +impl Mul<&Checked>> for &Checked> { + type Output = Checked>; + + fn mul(self, rhs: &Checked>) -> Checked> { + Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b)))) + } +} + +impl MulAssign for Checked> { + fn mul_assign(&mut self, other: Self) { + *self = *self * other; + } +} + +impl MulAssign<&Checked>> for Checked> { + fn mul_assign(&mut self, other: &Self) { + *self = *self * other; + } +} + +#[cfg(test)] +mod tests { + use crate::{CheckedMul, Zero, U64}; + + #[test] + fn mul_wide_zero_and_one() { + assert_eq!(U64::ZERO.mul_wide(&U64::ZERO), (U64::ZERO, U64::ZERO)); + assert_eq!(U64::ZERO.mul_wide(&U64::ONE), (U64::ZERO, U64::ZERO)); + assert_eq!(U64::ONE.mul_wide(&U64::ZERO), (U64::ZERO, U64::ZERO)); + assert_eq!(U64::ONE.mul_wide(&U64::ONE), (U64::ONE, U64::ZERO)); + } + + #[test] + fn mul_wide_lo_only() { + let primes: &[u32] = &[3, 5, 17, 256, 65537]; + + for &a_int in primes { + for &b_int in primes { + let (lo, hi) = U64::from_u32(a_int).mul_wide(&U64::from_u32(b_int)); + let expected = U64::from_u64(a_int as u64 * b_int as u64); + assert_eq!(lo, expected); + assert!(bool::from(hi.is_zero())); + } + } + } + + #[test] + fn checked_mul_ok() { + let n = U64::from_u32(0xffff_ffff); + assert_eq!( + n.checked_mul(&n).unwrap(), + U64::from_u64(0xffff_fffe_0000_0001) + ); + } + + #[test] + fn checked_mul_overflow() { + let n = U64::from_u64(0xffff_ffff_ffff_ffff); + assert!(bool::from(n.checked_mul(&n).is_none())); + } + + #[test] + fn saturating_mul_no_overflow() { + let n = U64::from_u8(8); + assert_eq!(n.saturating_mul(&n), U64::from_u8(64)); + } + + #[test] + fn saturating_mul_overflow() { + let a = U64::from(0xffff_ffff_ffff_ffffu64); + let b = U64::from(2u8); + assert_eq!(a.saturating_mul(&b), U64::MAX); + } + + #[test] + fn square() { + let n = U64::from_u64(0xffff_ffff_ffff_ffff); + let (hi, lo) = n.square().split(); + assert_eq!(lo, U64::from_u64(1)); + assert_eq!(hi, U64::from_u64(0xffff_ffff_ffff_fffe)); + } +} diff --git a/vendor/crypto-bigint/src/uint/mul_mod.rs b/vendor/crypto-bigint/src/uint/mul_mod.rs new file mode 100644 index 000000000..1e9c053ea --- /dev/null +++ b/vendor/crypto-bigint/src/uint/mul_mod.rs @@ -0,0 +1,131 @@ +//! [`UInt`] multiplication modulus operations. + +use crate::{Limb, UInt, WideWord, Word}; + +impl UInt { + /// Computes `self * rhs mod p` in constant time for the special modulus + /// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`]. + /// For the modulus reduction, this function implements Algorithm 14.47 from + /// the "Handbook of Applied Cryptography", by A. Menezes, P. van Oorschot, + /// and S. Vanstone, CRC Press, 1996. + pub const fn mul_mod_special(&self, rhs: &Self, c: Limb) -> Self { + // We implicitly assume `LIMBS > 0`, because `UInt<0>` doesn't compile. + // Still the case `LIMBS == 1` needs special handling. + if LIMBS == 1 { + let prod = self.limbs[0].0 as WideWord * rhs.limbs[0].0 as WideWord; + let reduced = prod % Word::MIN.wrapping_sub(c.0) as WideWord; + return Self::from_word(reduced as Word); + } + + let (lo, hi) = self.mul_wide(rhs); + + // Now use Algorithm 14.47 for the reduction + let (lo, carry) = mac_by_limb(lo, hi, c, Limb::ZERO); + + let (lo, carry) = { + let rhs = (carry.0 + 1) as WideWord * c.0 as WideWord; + lo.adc(&Self::from_wide_word(rhs), Limb::ZERO) + }; + + let (lo, _) = { + let rhs = carry.0.wrapping_sub(1) & c.0; + lo.sbb(&Self::from_word(rhs), Limb::ZERO) + }; + + lo + } +} + +/// Computes `a + (b * c) + carry`, returning the result along with the new carry. +const fn mac_by_limb( + mut a: UInt, + b: UInt, + c: Limb, + mut carry: Limb, +) -> (UInt, Limb) { + let mut i = 0; + + while i < LIMBS { + let (n, c) = a.limbs[i].mac(b.limbs[i], c, carry); + a.limbs[i] = n; + carry = c; + i += 1; + } + + (a, carry) +} + +#[cfg(all(test, feature = "rand"))] +mod tests { + use crate::{Limb, NonZero, Random, RandomMod, UInt}; + use rand_core::SeedableRng; + + macro_rules! test_mul_mod_special { + ($size:expr, $test_name:ident) => { + #[test] + fn $test_name() { + let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1); + let moduli = [ + NonZero::::random(&mut rng), + NonZero::::random(&mut rng), + ]; + + for special in &moduli { + let p = &NonZero::new(UInt::ZERO.wrapping_sub(&UInt::from_word(special.0))) + .unwrap(); + + let minus_one = p.wrapping_sub(&UInt::ONE); + + let base_cases = [ + (UInt::ZERO, UInt::ZERO, UInt::ZERO), + (UInt::ONE, UInt::ZERO, UInt::ZERO), + (UInt::ZERO, UInt::ONE, UInt::ZERO), + (UInt::ONE, UInt::ONE, UInt::ONE), + (minus_one, minus_one, UInt::ONE), + (minus_one, UInt::ONE, minus_one), + (UInt::ONE, minus_one, minus_one), + ]; + for (a, b, c) in &base_cases { + let x = a.mul_mod_special(&b, *special.as_ref()); + assert_eq!(*c, x, "{} * {} mod {} = {} != {}", a, b, p, x, c); + } + + for _i in 0..100 { + let a = UInt::<$size>::random_mod(&mut rng, p); + let b = UInt::<$size>::random_mod(&mut rng, p); + + let c = a.mul_mod_special(&b, *special.as_ref()); + assert!(c < **p, "not reduced: {} >= {} ", c, p); + + let expected = { + let (lo, hi) = a.mul_wide(&b); + let mut prod = UInt::<{ 2 * $size }>::ZERO; + prod.limbs[..$size].clone_from_slice(&lo.limbs); + prod.limbs[$size..].clone_from_slice(&hi.limbs); + let mut modulus = UInt::ZERO; + modulus.limbs[..$size].clone_from_slice(&p.as_ref().limbs); + let reduced = prod.reduce(&modulus).unwrap(); + let mut expected = UInt::ZERO; + expected.limbs[..].clone_from_slice(&reduced.limbs[..$size]); + expected + }; + assert_eq!(c, expected, "incorrect result"); + } + } + } + }; + } + + test_mul_mod_special!(1, mul_mod_special_1); + test_mul_mod_special!(2, mul_mod_special_2); + test_mul_mod_special!(3, mul_mod_special_3); + test_mul_mod_special!(4, mul_mod_special_4); + test_mul_mod_special!(5, mul_mod_special_5); + test_mul_mod_special!(6, mul_mod_special_6); + test_mul_mod_special!(7, mul_mod_special_7); + test_mul_mod_special!(8, mul_mod_special_8); + test_mul_mod_special!(9, mul_mod_special_9); + test_mul_mod_special!(10, mul_mod_special_10); + test_mul_mod_special!(11, mul_mod_special_11); + test_mul_mod_special!(12, mul_mod_special_12); +} diff --git a/vendor/crypto-bigint/src/uint/neg_mod.rs b/vendor/crypto-bigint/src/uint/neg_mod.rs new file mode 100644 index 000000000..0a1dc033a --- /dev/null +++ b/vendor/crypto-bigint/src/uint/neg_mod.rs @@ -0,0 +1,68 @@ +//! [`UInt`] negation modulus operations. + +use crate::{Limb, NegMod, UInt}; + +impl UInt { + /// Computes `-a mod p` in constant time. + /// Assumes `self` is in `[0, p)`. + pub const fn neg_mod(&self, p: &Self) -> Self { + let z = self.ct_is_nonzero(); + let mut ret = p.sbb(self, Limb::ZERO).0; + let mut i = 0; + while i < LIMBS { + // Set ret to 0 if the original value was 0, in which + // case ret would be p. + ret.limbs[i].0 &= z; + i += 1; + } + ret + } + + /// Computes `-a mod p` in constant time for the special modulus + /// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`]. + pub const fn neg_mod_special(&self, c: Limb) -> Self { + Self::ZERO.sub_mod_special(self, c) + } +} + +impl NegMod for UInt { + type Output = Self; + + fn neg_mod(&self, p: &Self) -> Self { + debug_assert!(self < p); + self.neg_mod(p) + } +} + +#[cfg(test)] +mod tests { + use crate::U256; + + #[test] + fn neg_mod_random() { + let x = + U256::from_be_hex("8d16e171674b4e6d8529edba4593802bf30b8cb161dd30aa8e550d41380007c2"); + let p = + U256::from_be_hex("928334a4e4be0843ec225a4c9c61df34bdc7a81513e4b6f76f2bfa3148e2e1b5"); + + let actual = x.neg_mod(&p); + let expected = + U256::from_be_hex("056c53337d72b9d666f86c9256ce5f08cabc1b63b207864ce0d6ecf010e2d9f3"); + + assert_eq!(expected, actual); + } + + #[test] + fn neg_mod_zero() { + let x = + U256::from_be_hex("0000000000000000000000000000000000000000000000000000000000000000"); + let p = + U256::from_be_hex("928334a4e4be0843ec225a4c9c61df34bdc7a81513e4b6f76f2bfa3148e2e1b5"); + + let actual = x.neg_mod(&p); + let expected = + U256::from_be_hex("0000000000000000000000000000000000000000000000000000000000000000"); + + assert_eq!(expected, actual); + } +} diff --git a/vendor/crypto-bigint/src/uint/rand.rs b/vendor/crypto-bigint/src/uint/rand.rs new file mode 100644 index 000000000..df551c71b --- /dev/null +++ b/vendor/crypto-bigint/src/uint/rand.rs @@ -0,0 +1,92 @@ +//! Random number generator support + +use super::UInt; +use crate::{Limb, NonZero, Random, RandomMod}; +use rand_core::{CryptoRng, RngCore}; +use subtle::ConstantTimeLess; + +#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))] +impl Random for UInt { + /// Generate a cryptographically secure random [`UInt`]. + fn random(mut rng: impl CryptoRng + RngCore) -> Self { + let mut limbs = [Limb::ZERO; LIMBS]; + + for limb in &mut limbs { + *limb = Limb::random(&mut rng) + } + + limbs.into() + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))] +impl RandomMod for UInt { + /// Generate a cryptographically secure random [`UInt`] which is less than + /// a given `modulus`. + /// + /// This function uses rejection sampling, a method which produces an + /// unbiased distribution of in-range values provided the underlying + /// [`CryptoRng`] is unbiased, but runs in variable-time. + /// + /// The variable-time nature of the algorithm should not pose a security + /// issue so long as the underlying random number generator is truly a + /// [`CryptoRng`], where previous outputs are unrelated to subsequent + /// outputs and do not reveal information about the RNG's internal state. + fn random_mod(mut rng: impl CryptoRng + RngCore, modulus: &NonZero) -> Self { + let mut n = Self::ZERO; + + // TODO(tarcieri): use `div_ceil` when available + // See: https://github.com/rust-lang/rust/issues/88581 + let mut n_limbs = modulus.bits_vartime() / Limb::BIT_SIZE; + if n_limbs < LIMBS { + n_limbs += 1; + } + + // Compute the highest limb of `modulus` as a `NonZero`. + // Add one to ensure `Limb::random_mod` returns values inclusive of this limb. + let modulus_hi = + NonZero::new(modulus.limbs[n_limbs.saturating_sub(1)].saturating_add(Limb::ONE)) + .unwrap(); // Always at least one due to `saturating_add` + + loop { + for i in 0..n_limbs { + n.limbs[i] = if (i + 1 == n_limbs) && (*modulus_hi != Limb::MAX) { + // Highest limb + Limb::random_mod(&mut rng, &modulus_hi) + } else { + Limb::random(&mut rng) + } + } + + if n.ct_lt(modulus).into() { + return n; + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::{NonZero, RandomMod, U256}; + use rand_core::SeedableRng; + + #[test] + fn random_mod() { + let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1); + + // Ensure `random_mod` runs in a reasonable amount of time + let modulus = NonZero::new(U256::from(42u8)).unwrap(); + let res = U256::random_mod(&mut rng, &modulus); + + // Sanity check that the return value isn't zero + assert_ne!(res, U256::ZERO); + + // Ensure `random_mod` runs in a reasonable amount of time + // when the modulus is larger than 1 limb + let modulus = NonZero::new(U256::from(0x10000000000000001u128)).unwrap(); + let res = U256::random_mod(&mut rng, &modulus); + + // Sanity check that the return value isn't zero + assert_ne!(res, U256::ZERO); + } +} diff --git a/vendor/crypto-bigint/src/uint/resize.rs b/vendor/crypto-bigint/src/uint/resize.rs new file mode 100644 index 000000000..5a5ec7eef --- /dev/null +++ b/vendor/crypto-bigint/src/uint/resize.rs @@ -0,0 +1,37 @@ +use super::UInt; + +impl UInt { + /// Construct a `UInt` from the unsigned integer value, + /// truncating the upper bits if the value is too large to be + /// represented. + #[inline(always)] + pub const fn resize(&self) -> UInt { + let mut res = UInt::ZERO; + let mut i = 0; + let dim = if T < LIMBS { T } else { LIMBS }; + while i < dim { + res.limbs[i] = self.limbs[i]; + i += 1; + } + res + } +} + +#[cfg(test)] +mod tests { + use crate::{U128, U64}; + + #[test] + fn resize_larger() { + let u = U64::from_be_hex("AAAAAAAABBBBBBBB"); + let u2: U128 = u.resize(); + assert_eq!(u2, U128::from_be_hex("0000000000000000AAAAAAAABBBBBBBB")); + } + + #[test] + fn resize_smaller() { + let u = U128::from_be_hex("AAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD"); + let u2: U64 = u.resize(); + assert_eq!(u2, U64::from_be_hex("CCCCCCCCDDDDDDDD")); + } +} diff --git a/vendor/crypto-bigint/src/uint/shl.rs b/vendor/crypto-bigint/src/uint/shl.rs new file mode 100644 index 000000000..9d4669130 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/shl.rs @@ -0,0 +1,134 @@ +//! [`UInt`] bitwise left shift operations. + +use crate::{Limb, UInt, Word}; +use core::ops::{Shl, ShlAssign}; + +impl UInt { + /// Computes `self << shift`. + /// + /// NOTE: this operation is variable time with respect to `n` *ONLY*. + /// + /// When used with a fixed `n`, this function is constant-time with respect + /// to `self`. + #[inline(always)] + pub const fn shl_vartime(&self, n: usize) -> Self { + let mut limbs = [Limb::ZERO; LIMBS]; + + if n >= Limb::BIT_SIZE * LIMBS { + return Self { limbs }; + } + + let shift_num = n / Limb::BIT_SIZE; + let rem = n % Limb::BIT_SIZE; + let nz = Limb(rem as Word).is_nonzero(); + let lshift_rem = rem as Word; + let rshift_rem = Limb::ct_select(Limb::ZERO, Limb((Limb::BIT_SIZE - rem) as Word), nz).0; + + let mut i = LIMBS - 1; + while i > shift_num { + let mut limb = self.limbs[i - shift_num].0 << lshift_rem; + let hi = self.limbs[i - shift_num - 1].0 >> rshift_rem; + limb |= hi & nz; + limbs[i] = Limb(limb); + i -= 1 + } + limbs[shift_num] = Limb(self.limbs[0].0 << lshift_rem); + + Self { limbs } + } +} + +impl Shl for UInt { + type Output = UInt; + + /// NOTE: this operation is variable time with respect to `rhs` *ONLY*. + /// + /// When used with a fixed `rhs`, this function is constant-time with respect + /// to `self`. + fn shl(self, rhs: usize) -> UInt { + self.shl_vartime(rhs) + } +} + +impl Shl for &UInt { + type Output = UInt; + + /// NOTE: this operation is variable time with respect to `rhs` *ONLY*. + /// + /// When used with a fixed `rhs`, this function is constant-time with respect + /// to `self`. + fn shl(self, rhs: usize) -> UInt { + self.shl_vartime(rhs) + } +} + +impl ShlAssign for UInt { + /// NOTE: this operation is variable time with respect to `rhs` *ONLY*. + /// + /// When used with a fixed `rhs`, this function is constant-time with respect + /// to `self`. + fn shl_assign(&mut self, rhs: usize) { + *self = self.shl_vartime(rhs) + } +} + +#[cfg(test)] +mod tests { + use crate::U256; + + const N: U256 = + U256::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"); + + const TWO_N: U256 = + U256::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD755DB9CD5E9140777FA4BD19A06C8282"); + + const FOUR_N: U256 = + U256::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFAEABB739ABD2280EEFF497A3340D90504"); + + const SIXTY_FIVE: U256 = + U256::from_be_hex("FFFFFFFFFFFFFFFD755DB9CD5E9140777FA4BD19A06C82820000000000000000"); + + const EIGHTY_EIGHT: U256 = + U256::from_be_hex("FFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD03641410000000000000000000000"); + + const SIXTY_FOUR: U256 = + U256::from_be_hex("FFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD03641410000000000000000"); + + #[test] + fn shl_simple() { + let mut t = U256::from(1u8); + assert_eq!(t << 1, U256::from(2u8)); + t = U256::from(3u8); + assert_eq!(t << 8, U256::from(0x300u16)); + } + + #[test] + fn shl1() { + assert_eq!(N << 1, TWO_N); + } + + #[test] + fn shl2() { + assert_eq!(N << 2, FOUR_N); + } + + #[test] + fn shl65() { + assert_eq!(N << 65, SIXTY_FIVE); + } + + #[test] + fn shl88() { + assert_eq!(N << 88, EIGHTY_EIGHT); + } + + #[test] + fn shl256() { + assert_eq!(N << 256, U256::default()); + } + + #[test] + fn shl64() { + assert_eq!(N << 64, SIXTY_FOUR); + } +} diff --git a/vendor/crypto-bigint/src/uint/shr.rs b/vendor/crypto-bigint/src/uint/shr.rs new file mode 100644 index 000000000..54375ae72 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/shr.rs @@ -0,0 +1,93 @@ +//! [`UInt`] bitwise right shift operations. + +use super::UInt; +use crate::Limb; +use core::ops::{Shr, ShrAssign}; + +impl UInt { + /// Computes `self >> n`. + /// + /// NOTE: this operation is variable time with respect to `n` *ONLY*. + /// + /// When used with a fixed `n`, this function is constant-time with respect + /// to `self`. + #[inline(always)] + pub const fn shr_vartime(&self, shift: usize) -> Self { + let full_shifts = shift / Limb::BIT_SIZE; + let small_shift = shift & (Limb::BIT_SIZE - 1); + let mut limbs = [Limb::ZERO; LIMBS]; + + if shift > Limb::BIT_SIZE * LIMBS { + return Self { limbs }; + } + + let n = LIMBS - full_shifts; + let mut i = 0; + + if small_shift == 0 { + while i < n { + limbs[i] = Limb(self.limbs[i + full_shifts].0); + i += 1; + } + } else { + while i < n { + let mut lo = self.limbs[i + full_shifts].0 >> small_shift; + + if i < (LIMBS - 1) - full_shifts { + lo |= self.limbs[i + full_shifts + 1].0 << (Limb::BIT_SIZE - small_shift); + } + + limbs[i] = Limb(lo); + i += 1; + } + } + + Self { limbs } + } +} + +impl Shr for UInt { + type Output = UInt; + + /// NOTE: this operation is variable time with respect to `rhs` *ONLY*. + /// + /// When used with a fixed `rhs`, this function is constant-time with respect + /// to `self`. + fn shr(self, rhs: usize) -> UInt { + self.shr_vartime(rhs) + } +} + +impl Shr for &UInt { + type Output = UInt; + + /// NOTE: this operation is variable time with respect to `rhs` *ONLY*. + /// + /// When used with a fixed `rhs`, this function is constant-time with respect + /// to `self`. + fn shr(self, rhs: usize) -> UInt { + self.shr_vartime(rhs) + } +} + +impl ShrAssign for UInt { + fn shr_assign(&mut self, rhs: usize) { + *self = self.shr_vartime(rhs); + } +} + +#[cfg(test)] +mod tests { + use crate::U256; + + const N: U256 = + U256::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"); + + const N_2: U256 = + U256::from_be_hex("7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0"); + + #[test] + fn shr1() { + assert_eq!(N >> 1, N_2); + } +} diff --git a/vendor/crypto-bigint/src/uint/split.rs b/vendor/crypto-bigint/src/uint/split.rs new file mode 100644 index 000000000..ecff9d6d8 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/split.rs @@ -0,0 +1,58 @@ +// TODO(tarcieri): use `const_evaluatable_checked` when stable to make generic around bits. +macro_rules! impl_split { + ($(($name:ident, $bits:expr)),+) => { + $( + impl $name { + /// Split this number in half, returning its high and low components + /// respectively. + pub const fn split(&self) -> (UInt<{nlimbs!($bits) / 2}>, UInt<{nlimbs!($bits) / 2}>) { + let mut lo = [Limb::ZERO; nlimbs!($bits) / 2]; + let mut hi = [Limb::ZERO; nlimbs!($bits) / 2]; + let mut i = 0; + let mut j = 0; + + while j < (nlimbs!($bits) / 2) { + lo[j] = self.limbs[i]; + i += 1; + j += 1; + } + + j = 0; + while j < (nlimbs!($bits) / 2) { + hi[j] = self.limbs[i]; + i += 1; + j += 1; + } + + (UInt { limbs: hi }, UInt { limbs: lo }) + } + } + + impl Split for $name { + type Output = UInt<{nlimbs!($bits) / 2}>; + + fn split(&self) -> (Self::Output, Self::Output) { + self.split() + } + } + + impl From<$name> for (UInt<{nlimbs!($bits) / 2}>, UInt<{nlimbs!($bits) / 2}>) { + fn from(num: $name) -> (UInt<{nlimbs!($bits) / 2}>, UInt<{nlimbs!($bits) / 2}>) { + num.split() + } + } + )+ + }; +} + +#[cfg(test)] +mod tests { + use crate::{U128, U64}; + + #[test] + fn split() { + let (hi, lo) = U128::from_be_hex("00112233445566778899aabbccddeeff").split(); + assert_eq!(hi, U64::from_u64(0x0011223344556677)); + assert_eq!(lo, U64::from_u64(0x8899aabbccddeeff)); + } +} diff --git a/vendor/crypto-bigint/src/uint/sqrt.rs b/vendor/crypto-bigint/src/uint/sqrt.rs new file mode 100644 index 000000000..4a9f26a61 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/sqrt.rs @@ -0,0 +1,145 @@ +//! [`UInt`] square root operations. + +use super::UInt; +use crate::{Limb, Word}; +use subtle::{ConstantTimeEq, CtOption}; + +impl UInt { + /// Computes √(`self`) + /// Uses Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13 + /// + /// Callers can check if `self` is a square by squaring the result + pub const fn sqrt(&self) -> Self { + let max_bits = (self.bits_vartime() + 1) >> 1; + let cap = Self::ONE.shl_vartime(max_bits); + let mut guess = cap; // ≥ √(`self`) + let mut xn = { + let q = self.wrapping_div(&guess); + let t = guess.wrapping_add(&q); + t.shr_vartime(1) + }; + + // If guess increased, the initial guess was low. + // Repeat until reverse course. + while guess.ct_cmp(&xn) == -1 { + // Sometimes an increase is too far, especially with large + // powers, and then takes a long time to walk back. The upper + // bound is based on bit size, so saturate on that. + let res = Limb::ct_cmp(Limb(xn.bits_vartime() as Word), Limb(max_bits as Word)) - 1; + let le = Limb::is_nonzero(Limb(res as Word)); + guess = Self::ct_select(cap, xn, le); + xn = { + let q = self.wrapping_div(&guess); + let t = guess.wrapping_add(&q); + t.shr_vartime(1) + }; + } + + // Repeat while guess decreases. + while guess.ct_cmp(&xn) == 1 && xn.ct_is_nonzero() == Word::MAX { + guess = xn; + xn = { + let q = self.wrapping_div(&guess); + let t = guess.wrapping_add(&q); + t.shr_vartime(1) + }; + } + + Self::ct_select(Self::ZERO, guess, self.ct_is_nonzero()) + } + + /// Wrapped sqrt is just normal √(`self`) + /// There’s no way wrapping could ever happen. + /// This function exists, so that all operations are accounted for in the wrapping operations. + pub const fn wrapping_sqrt(&self) -> Self { + self.sqrt() + } + + /// Perform checked sqrt, returning a [`CtOption`] which `is_some` + /// only if the √(`self`)² == self + pub fn checked_sqrt(&self) -> CtOption { + let r = self.sqrt(); + let s = r.wrapping_mul(&r); + CtOption::new(r, self.ct_eq(&s)) + } +} + +#[cfg(test)] +mod tests { + use crate::{Limb, U256}; + + #[cfg(feature = "rand")] + use { + crate::{CheckedMul, Random, U512}, + rand_chacha::ChaChaRng, + rand_core::{RngCore, SeedableRng}, + }; + + #[test] + fn edge() { + assert_eq!(U256::ZERO.sqrt(), U256::ZERO); + assert_eq!(U256::ONE.sqrt(), U256::ONE); + let mut half = U256::ZERO; + for i in 0..half.limbs.len() / 2 { + half.limbs[i] = Limb::MAX; + } + assert_eq!(U256::MAX.sqrt(), half,); + } + + #[test] + fn simple() { + let tests = [ + (4u8, 2u8), + (9, 3), + (16, 4), + (25, 5), + (36, 6), + (49, 7), + (64, 8), + (81, 9), + (100, 10), + (121, 11), + (144, 12), + (169, 13), + ]; + for (a, e) in &tests { + let l = U256::from(*a); + let r = U256::from(*e); + assert_eq!(l.sqrt(), r); + assert_eq!(l.checked_sqrt().is_some().unwrap_u8(), 1u8); + } + } + + #[test] + fn nonsquares() { + assert_eq!(U256::from(2u8).sqrt(), U256::from(1u8)); + assert_eq!(U256::from(2u8).checked_sqrt().is_some().unwrap_u8(), 0); + assert_eq!(U256::from(3u8).sqrt(), U256::from(1u8)); + assert_eq!(U256::from(3u8).checked_sqrt().is_some().unwrap_u8(), 0); + assert_eq!(U256::from(5u8).sqrt(), U256::from(2u8)); + assert_eq!(U256::from(6u8).sqrt(), U256::from(2u8)); + assert_eq!(U256::from(7u8).sqrt(), U256::from(2u8)); + assert_eq!(U256::from(8u8).sqrt(), U256::from(2u8)); + assert_eq!(U256::from(10u8).sqrt(), U256::from(3u8)); + } + + #[cfg(feature = "rand")] + #[test] + fn fuzz() { + let mut rng = ChaChaRng::from_seed([7u8; 32]); + for _ in 0..50 { + let t = rng.next_u32() as u64; + let s = U256::from(t); + let s2 = s.checked_mul(&s).unwrap(); + assert_eq!(s2.sqrt(), s); + assert_eq!(s2.checked_sqrt().is_some().unwrap_u8(), 1); + } + + for _ in 0..50 { + let s = U256::random(&mut rng); + let mut s2 = U512::ZERO; + s2.limbs[..s.limbs.len()].copy_from_slice(&s.limbs); + assert_eq!(s.square().sqrt(), s2); + } + } +} diff --git a/vendor/crypto-bigint/src/uint/sub.rs b/vendor/crypto-bigint/src/uint/sub.rs new file mode 100644 index 000000000..102f6b978 --- /dev/null +++ b/vendor/crypto-bigint/src/uint/sub.rs @@ -0,0 +1,192 @@ +//! [`UInt`] addition operations. + +use super::UInt; +use crate::{Checked, CheckedSub, Limb, Wrapping, Zero}; +use core::ops::{Sub, SubAssign}; +use subtle::CtOption; + +impl UInt { + /// Computes `a - (b + borrow)`, returning the result along with the new borrow. + #[inline(always)] + pub const fn sbb(&self, rhs: &Self, mut borrow: Limb) -> (Self, Limb) { + let mut limbs = [Limb::ZERO; LIMBS]; + let mut i = 0; + + while i < LIMBS { + let (w, b) = self.limbs[i].sbb(rhs.limbs[i], borrow); + limbs[i] = w; + borrow = b; + i += 1; + } + + (Self { limbs }, borrow) + } + + /// Perform saturating subtraction, returning `ZERO` on underflow. + pub const fn saturating_sub(&self, rhs: &Self) -> Self { + let (res, underflow) = self.sbb(rhs, Limb::ZERO); + + if underflow.0 == 0 { + res + } else { + Self::ZERO + } + } + + /// Perform wrapping subtraction, discarding underflow and wrapping around + /// the boundary of the type. + pub const fn wrapping_sub(&self, rhs: &Self) -> Self { + self.sbb(rhs, Limb::ZERO).0 + } +} + +impl CheckedSub<&UInt> for UInt { + type Output = Self; + + fn checked_sub(&self, rhs: &Self) -> CtOption { + let (result, underflow) = self.sbb(rhs, Limb::ZERO); + CtOption::new(result, underflow.is_zero()) + } +} + +impl Sub for Wrapping> { + type Output = Self; + + fn sub(self, rhs: Self) -> Wrapping> { + Wrapping(self.0.wrapping_sub(&rhs.0)) + } +} + +impl Sub<&Wrapping>> for Wrapping> { + type Output = Wrapping>; + + fn sub(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.wrapping_sub(&rhs.0)) + } +} + +impl Sub>> for &Wrapping> { + type Output = Wrapping>; + + fn sub(self, rhs: Wrapping>) -> Wrapping> { + Wrapping(self.0.wrapping_sub(&rhs.0)) + } +} + +impl Sub<&Wrapping>> for &Wrapping> { + type Output = Wrapping>; + + fn sub(self, rhs: &Wrapping>) -> Wrapping> { + Wrapping(self.0.wrapping_sub(&rhs.0)) + } +} + +impl SubAssign for Wrapping> { + fn sub_assign(&mut self, other: Self) { + *self = *self - other; + } +} + +impl SubAssign<&Wrapping>> for Wrapping> { + fn sub_assign(&mut self, other: &Self) { + *self = *self - other; + } +} + +impl Sub for Checked> { + type Output = Self; + + fn sub(self, rhs: Self) -> Checked> { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))), + ) + } +} + +impl Sub<&Checked>> for Checked> { + type Output = Checked>; + + fn sub(self, rhs: &Checked>) -> Checked> { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))), + ) + } +} + +impl Sub>> for &Checked> { + type Output = Checked>; + + fn sub(self, rhs: Checked>) -> Checked> { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))), + ) + } +} + +impl Sub<&Checked>> for &Checked> { + type Output = Checked>; + + fn sub(self, rhs: &Checked>) -> Checked> { + Checked( + self.0 + .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))), + ) + } +} + +impl SubAssign for Checked> { + fn sub_assign(&mut self, other: Self) { + *self = *self - other; + } +} + +impl SubAssign<&Checked>> for Checked> { + fn sub_assign(&mut self, other: &Self) { + *self = *self - other; + } +} + +#[cfg(test)] +mod tests { + use crate::{CheckedSub, Limb, U128}; + + #[test] + fn sbb_no_borrow() { + let (res, borrow) = U128::ONE.sbb(&U128::ONE, Limb::ZERO); + assert_eq!(res, U128::ZERO); + assert_eq!(borrow, Limb::ZERO); + } + + #[test] + fn sbb_with_borrow() { + let (res, borrow) = U128::ZERO.sbb(&U128::ONE, Limb::ZERO); + + assert_eq!(res, U128::MAX); + assert_eq!(borrow, Limb::MAX); + } + + #[test] + fn wrapping_sub_no_borrow() { + assert_eq!(U128::ONE.wrapping_sub(&U128::ONE), U128::ZERO); + } + + #[test] + fn wrapping_sub_with_borrow() { + assert_eq!(U128::ZERO.wrapping_sub(&U128::ONE), U128::MAX); + } + + #[test] + fn checked_sub_ok() { + let result = U128::ONE.checked_sub(&U128::ONE); + assert_eq!(result.unwrap(), U128::ZERO); + } + + #[test] + fn checked_sub_overflow() { + let result = U128::ZERO.checked_sub(&U128::ONE); + assert!(!bool::from(result.is_some())); + } +} diff --git a/vendor/crypto-bigint/src/uint/sub_mod.rs b/vendor/crypto-bigint/src/uint/sub_mod.rs new file mode 100644 index 000000000..f699f66eb --- /dev/null +++ b/vendor/crypto-bigint/src/uint/sub_mod.rs @@ -0,0 +1,182 @@ +//! [`UInt`] subtraction modulus operations. + +use crate::{Limb, SubMod, UInt}; + +impl UInt { + /// Computes `self - rhs mod p` in constant time. + /// + /// Assumes `self - rhs` as unbounded signed integer is in `[-p, p)`. + pub const fn sub_mod(&self, rhs: &UInt, p: &UInt) -> UInt { + let (mut out, borrow) = self.sbb(rhs, Limb::ZERO); + + // If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise + // borrow = 0x000...000. Thus, we use it as a mask to conditionally add the modulus. + let mut carry = Limb::ZERO; + let mut i = 0; + + while i < LIMBS { + let (l, c) = out.limbs[i].adc(p.limbs[i].bitand(borrow), carry); + out.limbs[i] = l; + carry = c; + i += 1; + } + + out + } + + /// Computes `self - rhs mod p` in constant time for the special modulus + /// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`]. + /// + /// Assumes `self - rhs` as unbounded signed integer is in `[-p, p)`. + pub const fn sub_mod_special(&self, rhs: &Self, c: Limb) -> Self { + let (out, borrow) = self.sbb(rhs, Limb::ZERO); + + // If underflow occurred, then we need to subtract `c` to account for + // the underflow. This cannot underflow due to the assumption + // `self - rhs >= -p`. + let l = borrow.0 & c.0; + let (out, _) = out.sbb(&UInt::from_word(l), Limb::ZERO); + out + } +} + +impl SubMod for UInt { + type Output = Self; + + fn sub_mod(&self, rhs: &Self, p: &Self) -> Self { + debug_assert!(self < p); + debug_assert!(rhs < p); + self.sub_mod(rhs, p) + } +} + +#[cfg(all(test, feature = "rand"))] +mod tests { + use crate::{Limb, NonZero, Random, RandomMod, UInt}; + use rand_core::SeedableRng; + + macro_rules! test_sub_mod { + ($size:expr, $test_name:ident) => { + #[test] + fn $test_name() { + let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1); + let moduli = [ + NonZero::>::random(&mut rng), + NonZero::>::random(&mut rng), + ]; + + for p in &moduli { + let base_cases = [ + (1u64, 0u64, 1u64.into()), + (0, 1, p.wrapping_sub(&1u64.into())), + (0, 0, 0u64.into()), + ]; + for (a, b, c) in &base_cases { + let a: UInt<$size> = (*a).into(); + let b: UInt<$size> = (*b).into(); + + let x = a.sub_mod(&b, p); + assert_eq!(*c, x, "{} - {} mod {} = {} != {}", a, b, p, x, c); + } + + if $size > 1 { + for _i in 0..100 { + let a: UInt<$size> = Limb::random(&mut rng).into(); + let b: UInt<$size> = Limb::random(&mut rng).into(); + let (a, b) = if a < b { (b, a) } else { (a, b) }; + + let c = a.sub_mod(&b, p); + assert!(c < **p, "not reduced"); + assert_eq!(c, a.wrapping_sub(&b), "result incorrect"); + } + } + + for _i in 0..100 { + let a = UInt::<$size>::random_mod(&mut rng, p); + let b = UInt::<$size>::random_mod(&mut rng, p); + + let c = a.sub_mod(&b, p); + assert!(c < **p, "not reduced: {} >= {} ", c, p); + + let x = a.wrapping_sub(&b); + if a >= b && x < **p { + assert_eq!(c, x, "incorrect result"); + } + } + } + } + }; + } + + macro_rules! test_sub_mod_special { + ($size:expr, $test_name:ident) => { + #[test] + fn $test_name() { + let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1); + let moduli = [ + NonZero::::random(&mut rng), + NonZero::::random(&mut rng), + ]; + + for special in &moduli { + let p = &NonZero::new(UInt::ZERO.wrapping_sub(&UInt::from_word(special.0))) + .unwrap(); + + let minus_one = p.wrapping_sub(&UInt::ONE); + + let base_cases = [ + (UInt::ZERO, UInt::ZERO, UInt::ZERO), + (UInt::ONE, UInt::ZERO, UInt::ONE), + (UInt::ZERO, UInt::ONE, minus_one), + (minus_one, minus_one, UInt::ZERO), + (UInt::ZERO, minus_one, UInt::ONE), + ]; + for (a, b, c) in &base_cases { + let x = a.sub_mod_special(&b, *special.as_ref()); + assert_eq!(*c, x, "{} - {} mod {} = {} != {}", a, b, p, x, c); + } + + for _i in 0..100 { + let a = UInt::<$size>::random_mod(&mut rng, p); + let b = UInt::<$size>::random_mod(&mut rng, p); + + let c = a.sub_mod_special(&b, *special.as_ref()); + assert!(c < **p, "not reduced: {} >= {} ", c, p); + + let expected = a.sub_mod(&b, p); + assert_eq!(c, expected, "incorrect result"); + } + } + } + }; + } + + // Test requires 1-limb is capable of representing a 64-bit integer + #[cfg(target_pointer_width = "64")] + test_sub_mod!(1, sub1); + + test_sub_mod!(2, sub2); + test_sub_mod!(3, sub3); + test_sub_mod!(4, sub4); + test_sub_mod!(5, sub5); + test_sub_mod!(6, sub6); + test_sub_mod!(7, sub7); + test_sub_mod!(8, sub8); + test_sub_mod!(9, sub9); + test_sub_mod!(10, sub10); + test_sub_mod!(11, sub11); + test_sub_mod!(12, sub12); + + test_sub_mod_special!(1, sub_mod_special_1); + test_sub_mod_special!(2, sub_mod_special_2); + test_sub_mod_special!(3, sub_mod_special_3); + test_sub_mod_special!(4, sub_mod_special_4); + test_sub_mod_special!(5, sub_mod_special_5); + test_sub_mod_special!(6, sub_mod_special_6); + test_sub_mod_special!(7, sub_mod_special_7); + test_sub_mod_special!(8, sub_mod_special_8); + test_sub_mod_special!(9, sub_mod_special_9); + test_sub_mod_special!(10, sub_mod_special_10); + test_sub_mod_special!(11, sub_mod_special_11); + test_sub_mod_special!(12, sub_mod_special_12); +} diff --git a/vendor/crypto-bigint/src/wrapping.rs b/vendor/crypto-bigint/src/wrapping.rs new file mode 100644 index 000000000..c2f6c2b34 --- /dev/null +++ b/vendor/crypto-bigint/src/wrapping.rs @@ -0,0 +1,108 @@ +//! Wrapping arithmetic. + +use crate::Zero; +use core::fmt; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq}; + +#[cfg(feature = "serde")] +use serdect::serde::{Deserialize, Deserializer, Serialize, Serializer}; + +/// Provides intentionally-wrapped arithmetic on `T`. +/// +/// This is analogous to [`core::num::Wrapping`] but allows this crate to +/// define trait impls for this type. +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, PartialOrd, Ord)] +pub struct Wrapping(pub T); + +impl Zero for Wrapping { + const ZERO: Self = Self(T::ZERO); +} + +impl fmt::Display for Wrapping { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Binary for Wrapping { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Octal for Wrapping { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::LowerHex for Wrapping { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::UpperHex for Wrapping { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl ConditionallySelectable for Wrapping { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Wrapping(T::conditional_select(&a.0, &b.0, choice)) + } +} + +impl ConstantTimeEq for Wrapping { + fn ct_eq(&self, other: &Self) -> Choice { + self.0.ct_eq(&other.0) + } +} + +#[cfg(feature = "serde")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] +impl<'de, T: Deserialize<'de>> Deserialize<'de> for Wrapping { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Ok(Self(T::deserialize(deserializer)?)) + } +} + +#[cfg(feature = "serde")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] +impl<'de, T: Serialize> Serialize for Wrapping { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.0.serialize(serializer) + } +} + +#[cfg(all(test, feature = "serde"))] +mod tests { + use crate::{Wrapping, U64}; + + #[test] + fn serde() { + const TEST: Wrapping = Wrapping(U64::from_u64(0x0011223344556677)); + + let serialized = bincode::serialize(&TEST).unwrap(); + let deserialized: Wrapping = bincode::deserialize(&serialized).unwrap(); + + assert_eq!(TEST, deserialized); + } + + #[test] + fn serde_owned() { + const TEST: Wrapping = Wrapping(U64::from_u64(0x0011223344556677)); + + let serialized = bincode::serialize(&TEST).unwrap(); + let deserialized: Wrapping = bincode::deserialize_from(serialized.as_slice()).unwrap(); + + assert_eq!(TEST, deserialized); + } +} diff --git a/vendor/crypto-bigint/tests/proptests.rs b/vendor/crypto-bigint/tests/proptests.rs new file mode 100644 index 000000000..42a3066c6 --- /dev/null +++ b/vendor/crypto-bigint/tests/proptests.rs @@ -0,0 +1,209 @@ +//! Equivalence tests between `num-bigint` and `crypto-bigint` + +use crypto_bigint::{Encoding, U256}; +use num_bigint::BigUint; +use num_traits::identities::Zero; +use proptest::prelude::*; +use std::mem; + +/// Example prime number (NIST P-256 curve order) +const P: U256 = + U256::from_be_hex("ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551"); + +fn to_biguint(uint: &U256) -> BigUint { + BigUint::from_bytes_le(uint.to_le_bytes().as_ref()) +} + +fn to_uint(big_uint: BigUint) -> U256 { + let mut input = [0u8; U256::BYTE_SIZE]; + let encoded = big_uint.to_bytes_le(); + let l = encoded.len().min(U256::BYTE_SIZE); + input[..l].copy_from_slice(&encoded[..l]); + + U256::from_le_slice(&input) +} + +prop_compose! { + fn uint()(bytes in any::<[u8; 32]>()) -> U256 { + U256::from_le_slice(&bytes) + } +} +prop_compose! { + fn uint_mod_p(p: U256)(a in uint()) -> U256 { + a.wrapping_rem(&p) + } +} + +proptest! { + #[test] + fn roundtrip(a in uint()) { + assert_eq!(a, to_uint(to_biguint(&a))); + } + + #[test] + fn wrapping_add(a in uint(), b in uint()) { + let a_bi = to_biguint(&a); + let b_bi = to_biguint(&b); + + let expected = to_uint(a_bi + b_bi); + let actual = a.wrapping_add(&b); + + assert_eq!(expected, actual); + } + + #[test] + fn add_mod_nist_p256(a in uint_mod_p(P), b in uint_mod_p(P)) { + assert!(a < P); + assert!(b < P); + + let a_bi = to_biguint(&a); + let b_bi = to_biguint(&b); + let p_bi = to_biguint(&P); + + let expected = to_uint((a_bi + b_bi) % p_bi); + let actual = a.add_mod(&b, &P); + + assert!(expected < P); + assert!(actual < P); + + assert_eq!(expected, actual); + } + + #[test] + fn sub_mod_nist_p256(mut a in uint_mod_p(P), mut b in uint_mod_p(P)) { + if b > a { + mem::swap(&mut a, &mut b); + } + + assert!(a < P); + assert!(b < P); + + let a_bi = to_biguint(&a); + let b_bi = to_biguint(&b); + let p_bi = to_biguint(&P); + + let expected = to_uint((a_bi - b_bi) % p_bi); + let actual = a.sub_mod(&b, &P); + + assert!(expected < P); + assert!(actual < P); + + assert_eq!(expected, actual); + } + + #[test] + fn wrapping_sub(mut a in uint(), mut b in uint()) { + if b > a { + mem::swap(&mut a, &mut b); + } + + let a_bi = to_biguint(&a); + let b_bi = to_biguint(&b); + + let expected = to_uint(a_bi - b_bi); + let actual = a.wrapping_sub(&b); + + assert_eq!(expected, actual); + } + + #[test] + fn wrapping_mul(a in uint(), b in uint()) { + let a_bi = to_biguint(&a); + let b_bi = to_biguint(&b); + + let expected = to_uint(a_bi * b_bi); + let actual = a.wrapping_mul(&b); + + assert_eq!(expected, actual); + } + + #[test] + fn wrapping_div(a in uint(), b in uint()) { + let a_bi = to_biguint(&a); + let b_bi = to_biguint(&b); + + if !b_bi.is_zero() { + let expected = to_uint(a_bi / b_bi); + let actual = a.wrapping_div(&b); + + assert_eq!(expected, actual); + } + } + + #[test] + fn wrapping_rem(a in uint(), b in uint()) { + let a_bi = to_biguint(&a); + let b_bi = to_biguint(&b); + + if b_bi.is_zero() { + let expected = to_uint(a_bi % b_bi); + let actual = a.wrapping_rem(&b); + + assert_eq!(expected, actual); + } + } + + #[test] + fn wrapping_sqrt(a in uint()) { + let a_bi = to_biguint(&a); + let expected = to_uint(a_bi.sqrt()); + let actual = a.wrapping_sqrt(); + + assert_eq!(expected, actual); + } + + #[test] + fn wrapping_or(a in uint(), b in uint()) { + let a_bi = to_biguint(&a); + let b_bi = to_biguint(&b); + + if !b_bi.is_zero() { + let expected = to_uint(a_bi | b_bi); + let actual = a.wrapping_or(&b); + + assert_eq!(expected, actual); + } + } + + #[test] + fn wrapping_and(a in uint(), b in uint()) { + let a_bi = to_biguint(&a); + let b_bi = to_biguint(&b); + + if !b_bi.is_zero() { + let expected = to_uint(a_bi & b_bi); + let actual = a.wrapping_and(&b); + + assert_eq!(expected, actual); + } + } + + #[test] + fn wrapping_xor(a in uint(), b in uint()) { + let a_bi = to_biguint(&a); + let b_bi = to_biguint(&b); + if !b_bi.is_zero() { + let expected = to_uint(a_bi ^ b_bi); + let actual = a.wrapping_xor(&b); + + assert_eq!(expected, actual); + } + } + + #[test] + fn encoding(a in uint()) { + assert_eq!(a, U256::from_be_bytes(a.to_be_bytes())); + assert_eq!(a, U256::from_le_bytes(a.to_le_bytes())); + } + + #[test] + fn encoding_reverse(a in uint()) { + let mut bytes = a.to_be_bytes(); + bytes.reverse(); + assert_eq!(a, U256::from_le_bytes(bytes)); + + let mut bytes = a.to_le_bytes(); + bytes.reverse(); + assert_eq!(a, U256::from_be_bytes(bytes)); +} +} -- cgit v1.2.3